{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \"\"\"\n\n html_splitter = RecursiveCharacterTextSplitter.from_language( language=Language.HTML, chunk_size=60, chunk_overlap=0)html_docs = html_splitter.create_documents([html_text])html_docs\n\n [Document(page_content='\\n'), Document(page_content='\\n 🦜️🔗 LangChain'), Document(page_content='\\n = 18 && age < 65) { // Age is an adult } else { // Age is a senior citizen } }}\"\"\"c_splitter = RecursiveCharacterTextSplitter.from_language( language=Language.CSHARP, chunk_size=128, chunk_overlap=0)c_docs = c_splitter.create_documents([C_CODE])c_docs\n\n [Document(page_content='using System;'), Document(page_content='class Program\\n{\\n static void Main()\\n {\\n int age = 30; // Change the age value as needed'), Document(page_content='// Categorize the age without any console output\\n if (age < 18)\\n {\\n // Age is under 18'), Document(page_content='}\\n else if (age >= 18 && age < 65)\\n {\\n // Age is an adult\\n }\\n else\\n {'), Document(page_content='// Age is a senior citizen\\n }\\n }\\n}')]\n\nHaskell[​](#haskell \"Direct link to Haskell\")\n---------------------------------------------\n\nHere's an example using the Haskell text splitter:\n\n HASKELL_CODE = \"\"\"main :: IO ()main = do putStrLn \"Hello, World!\"-- Some sample functionsadd :: Int -> Int -> Intadd x y = x + y\"\"\"haskell_splitter = RecursiveCharacterTextSplitter.from_language( language=Language.HASKELL, chunk_size=50, chunk_overlap=0)haskell_docs = haskell_splitter.create_documents([HASKELL_CODE])haskell_docs\n\n [Document(page_content='main :: IO ()'), Document(page_content='main = do\\n putStrLn \"Hello, World!\"\\n-- Some'), Document(page_content='sample functions\\nadd :: Int -> Int -> Int\\nadd x y'), Document(page_content='= x + y')]\n\nPHP[​](#php \"Direct link to PHP\")\n---------------------------------\n\nHere's an example using the PHP text splitter:\n\n PHP_CODE = \"\"\" str: return vectorstore.similarity_search(v, k=1)[0].page_content\n\n system = \"\"\"Generate a relevant search query for a library system\"\"\"prompt = ChatPromptTemplate.from_messages( [ (\"system\", system), (\"human\", \"{question}\"), ])corrective_structure_llm = llm.with_structured_output(Search)corrective_query_analyzer = ( {\"question\": RunnablePassthrough()} | prompt | corrective_structure_llm)\n\n corrective_query_analyzer.invoke(\"what are books about aliens by jes knight\")\n\n Search(query='books about aliens', author='Jesse Knight')\n\n # TODO: show trigram similarity\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/query_high_cardinality.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to configure runtime chain internals\n\n](/v0.2/docs/how_to/configure/)[\n\nNext\n\nCustom Document Loader\n\n](/v0.2/docs/how_to/document_loader_custom/)\n\n* [Setup](#setup)\n* [Query Analysis](#query-analysis)\n * [Add in all values](#add-in-all-values)\n * [Find and all relevant values](#find-and-all-relevant-values)\n * [Replace after selection](#replace-after-selection)"},"last_modified":{"kind":"null"}}},{"rowIdx":1327,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/versions/v0_2/deprecations/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* Versions\n* [v0.2](/v0.2/docs/versions/v0_2/)\n* Changes\n\nOn this page\n\nDeprecations and Breaking Changes\n=================================\n\nThis code contains a list of deprecations and removals in the `langchain` and `langchain-core` packages.\n\nNew features and improvements are not listed here. See the [overview](/v0.2/docs/versions/overview/) for a summary of what's new in this release.\n\nBreaking changes[​](#breaking-changes \"Direct link to Breaking changes\")\n------------------------------------------------------------------------\n\nAs of release 0.2.0, `langchain` is required to be integration-agnostic. This means that code in `langchain` should not by default instantiate any specific chat models, llms, embedding models, vectorstores etc; instead, the user will be required to specify those explicitly.\n\nThe following functions and classes require an explicit LLM to be passed as an argument:\n\n* `langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreToolkit`\n* `langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreRouterToolkit`\n* `langchain.chains.openai_functions.get_openapi_chain`\n* `langchain.chains.router.MultiRetrievalQAChain.from_retrievers`\n* `langchain.indexes.VectorStoreIndexWrapper.query`\n* `langchain.indexes.VectorStoreIndexWrapper.query_with_sources`\n* `langchain.indexes.VectorStoreIndexWrapper.aquery_with_sources`\n* `langchain.chains.flare.FlareChain`\n\nThe following classes now require passing an explicit Embedding model as an argument:\n\n* `langchain.indexes.VectostoreIndexCreator`\n\nThe following code has been removed:\n\n* `langchain.natbot.NatBotChain.from_default` removed in favor of the `from_llm` class method.\n\nBehavior was changed for the following code:\n\n### @tool decorator[​](#tool-decorator \"Direct link to @tool decorator\")\n\n`@tool` decorator now assigns the function doc-string as the tool description. Previously, the `@tool` decorator using to prepend the function signature.\n\nBefore 0.2.0:\n\n @tooldef my_tool(x: str) -> str: \"\"\"Some description.\"\"\" return \"something\"print(my_tool.description)\n\nWould result in: `my_tool: (x: str) -> str - Some description.`\n\nAs of 0.2.0:\n\nIt will result in: `Some description.`\n\nCode that moved to another package[​](#code-that-moved-to-another-package \"Direct link to Code that moved to another package\")\n------------------------------------------------------------------------------------------------------------------------------\n\nCode that was moved from `langchain` into another package (e.g, `langchain-community`)\n\nIf you try to import it from `langchain`, the import will keep on working, but will raise a deprecation warning. The warning will provide a replacement import statement.\n\n python -c \"from langchain.document_loaders.markdown import UnstructuredMarkdownLoader\"\n\n LangChainDeprecationWarning: Importing UnstructuredMarkdownLoader from langchain.document_loaders is deprecated. Please replace deprecated imports:>> from langchain.document_loaders import UnstructuredMarkdownLoaderwith new imports of:>> from langchain_community.document_loaders import UnstructuredMarkdownLoader\n\nWe will continue supporting the imports in `langchain` until release 0.4 as long as the relevant package where the code lives is installed. (e.g., as long as `langchain_community` is installed.)\n\nHowever, we advise for users to not rely on these imports and instead migrate to the new imports. To help with this process, we’re releasing a migration script via the LangChain CLI. See further instructions in migration guide.\n\nCode targeted for removal[​](#code-targeted-for-removal \"Direct link to Code targeted for removal\")\n---------------------------------------------------------------------------------------------------\n\nCode that has better alternatives available and will eventually be removed, so there’s only a single way to do things. (e.g., `predict_messages` method in ChatModels has been deprecated in favor of `invoke`).\n\n### astream events V1[​](#astream-events-v1 \"Direct link to astream events V1\")\n\nIf you are using `astream_events`, please review how to [migrate to astream events v2](/v0.2/docs/versions/v0_2/migrating_astream_events/).\n\n### langchain\\_core[​](#langchain_core \"Direct link to langchain_core\")\n\n#### try\\_load\\_from\\_hub[​](#try_load_from_hub \"Direct link to try_load_from_hub\")\n\nIn module: `utils.loading` Deprecated: 0.1.30 Removal: 0.3.0\n\nAlternative: Using the hwchase17/langchain-hub repo for prompts is deprecated. Please use [https://smith.langchain.com/hub](https://smith.langchain.com/hub) instead.\n\n#### BaseLanguageModel.predict[​](#baselanguagemodelpredict \"Direct link to BaseLanguageModel.predict\")\n\nIn module: `language_models.base` Deprecated: 0.1.7 Removal: 0.3.0\n\nAlternative: invoke\n\n#### BaseLanguageModel.predict\\_messages[​](#baselanguagemodelpredict_messages \"Direct link to BaseLanguageModel.predict_messages\")\n\nIn module: `language_models.base` Deprecated: 0.1.7 Removal: 0.3.0\n\nAlternative: invoke\n\n#### BaseLanguageModel.apredict[​](#baselanguagemodelapredict \"Direct link to BaseLanguageModel.apredict\")\n\nIn module: `language_models.base` Deprecated: 0.1.7 Removal: 0.3.0\n\nAlternative: ainvoke\n\n#### BaseLanguageModel.apredict\\_messages[​](#baselanguagemodelapredict_messages \"Direct link to BaseLanguageModel.apredict_messages\")\n\nIn module: `language_models.base` Deprecated: 0.1.7 Removal: 0.3.0\n\nAlternative: ainvoke\n\n#### RunTypeEnum[​](#runtypeenum \"Direct link to RunTypeEnum\")\n\nIn module: `tracers.schemas` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative: Use string instead.\n\n#### TracerSessionV1Base[​](#tracersessionv1base \"Direct link to TracerSessionV1Base\")\n\nIn module: `tracers.schemas` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative:\n\n#### TracerSessionV1Create[​](#tracersessionv1create \"Direct link to TracerSessionV1Create\")\n\nIn module: `tracers.schemas` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative:\n\n#### TracerSessionV1[​](#tracersessionv1 \"Direct link to TracerSessionV1\")\n\nIn module: `tracers.schemas` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative:\n\n#### TracerSessionBase[​](#tracersessionbase \"Direct link to TracerSessionBase\")\n\nIn module: `tracers.schemas` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative:\n\n#### TracerSession[​](#tracersession \"Direct link to TracerSession\")\n\nIn module: `tracers.schemas` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative:\n\n#### BaseRun[​](#baserun \"Direct link to BaseRun\")\n\nIn module: `tracers.schemas` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative: Run\n\n#### LLMRun[​](#llmrun \"Direct link to LLMRun\")\n\nIn module: `tracers.schemas` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative: Run\n\n#### ChainRun[​](#chainrun \"Direct link to ChainRun\")\n\nIn module: `tracers.schemas` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative: Run\n\n#### ToolRun[​](#toolrun \"Direct link to ToolRun\")\n\nIn module: `tracers.schemas` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative: Run\n\n#### BaseChatModel.**call**[​](#basechatmodelcall \"Direct link to basechatmodelcall\")\n\nIn module: `language_models.chat_models` Deprecated: 0.1.7 Removal: 0.3.0\n\nAlternative: invoke\n\n#### BaseChatModel.call\\_as\\_llm[​](#basechatmodelcall_as_llm \"Direct link to BaseChatModel.call_as_llm\")\n\nIn module: `language_models.chat_models` Deprecated: 0.1.7 Removal: 0.3.0\n\nAlternative: invoke\n\n#### BaseChatModel.predict[​](#basechatmodelpredict \"Direct link to BaseChatModel.predict\")\n\nIn module: `language_models.chat_models` Deprecated: 0.1.7 Removal: 0.3.0\n\nAlternative: invoke\n\n#### BaseChatModel.predict\\_messages[​](#basechatmodelpredict_messages \"Direct link to BaseChatModel.predict_messages\")\n\nIn module: `language_models.chat_models` Deprecated: 0.1.7 Removal: 0.3.0\n\nAlternative: invoke\n\n#### BaseChatModel.apredict[​](#basechatmodelapredict \"Direct link to BaseChatModel.apredict\")\n\nIn module: `language_models.chat_models` Deprecated: 0.1.7 Removal: 0.3.0\n\nAlternative: ainvoke\n\n#### BaseChatModel.apredict\\_messages[​](#basechatmodelapredict_messages \"Direct link to BaseChatModel.apredict_messages\")\n\nIn module: `language_models.chat_models` Deprecated: 0.1.7 Removal: 0.3.0\n\nAlternative: ainvoke\n\n#### BaseLLM.**call**[​](#basellmcall \"Direct link to basellmcall\")\n\nIn module: `language_models.llms` Deprecated: 0.1.7 Removal: 0.3.0\n\nAlternative: invoke\n\n#### BaseLLM.predict[​](#basellmpredict \"Direct link to BaseLLM.predict\")\n\nIn module: `language_models.llms` Deprecated: 0.1.7 Removal: 0.3.0\n\nAlternative: invoke\n\n#### BaseLLM.predict\\_messages[​](#basellmpredict_messages \"Direct link to BaseLLM.predict_messages\")\n\nIn module: `language_models.llms` Deprecated: 0.1.7 Removal: 0.3.0\n\nAlternative: invoke\n\n#### BaseLLM.apredict[​](#basellmapredict \"Direct link to BaseLLM.apredict\")\n\nIn module: `language_models.llms` Deprecated: 0.1.7 Removal: 0.3.0\n\nAlternative: ainvoke\n\n#### BaseLLM.apredict\\_messages[​](#basellmapredict_messages \"Direct link to BaseLLM.apredict_messages\")\n\nIn module: `language_models.llms` Deprecated: 0.1.7 Removal: 0.3.0\n\nAlternative: ainvoke\n\n#### BaseRetriever.get\\_relevant\\_documents[​](#baseretrieverget_relevant_documents \"Direct link to BaseRetriever.get_relevant_documents\")\n\nIn module: `retrievers` Deprecated: 0.1.46 Removal: 0.3.0\n\nAlternative: invoke\n\n#### BaseRetriever.aget\\_relevant\\_documents[​](#baseretrieveraget_relevant_documents \"Direct link to BaseRetriever.aget_relevant_documents\")\n\nIn module: `retrievers` Deprecated: 0.1.46 Removal: 0.3.0\n\nAlternative: ainvoke\n\n#### ChatPromptTemplate.from\\_role\\_strings[​](#chatprompttemplatefrom_role_strings \"Direct link to ChatPromptTemplate.from_role_strings\")\n\nIn module: `prompts.chat` Deprecated: 0.0.1 Removal:\n\nAlternative: from\\_messages classmethod\n\n#### ChatPromptTemplate.from\\_strings[​](#chatprompttemplatefrom_strings \"Direct link to ChatPromptTemplate.from_strings\")\n\nIn module: `prompts.chat` Deprecated: 0.0.1 Removal:\n\nAlternative: from\\_messages classmethod\n\n#### BaseTool.**call**[​](#basetoolcall \"Direct link to basetoolcall\")\n\nIn module: `tools` Deprecated: 0.1.47 Removal: 0.3.0\n\nAlternative: invoke\n\n#### convert\\_pydantic\\_to\\_openai\\_function[​](#convert_pydantic_to_openai_function \"Direct link to convert_pydantic_to_openai_function\")\n\nIn module: `utils.function_calling` Deprecated: 0.1.16 Removal: 0.3.0\n\nAlternative: langchain\\_core.utils.function\\_calling.convert\\_to\\_openai\\_function()\n\n#### convert\\_pydantic\\_to\\_openai\\_tool[​](#convert_pydantic_to_openai_tool \"Direct link to convert_pydantic_to_openai_tool\")\n\nIn module: `utils.function_calling` Deprecated: 0.1.16 Removal: 0.3.0\n\nAlternative: langchain\\_core.utils.function\\_calling.convert\\_to\\_openai\\_tool()\n\n#### convert\\_python\\_function\\_to\\_openai\\_function[​](#convert_python_function_to_openai_function \"Direct link to convert_python_function_to_openai_function\")\n\nIn module: `utils.function_calling` Deprecated: 0.1.16 Removal: 0.3.0\n\nAlternative: langchain\\_core.utils.function\\_calling.convert\\_to\\_openai\\_function()\n\n#### format\\_tool\\_to\\_openai\\_function[​](#format_tool_to_openai_function \"Direct link to format_tool_to_openai_function\")\n\nIn module: `utils.function_calling` Deprecated: 0.1.16 Removal: 0.3.0\n\nAlternative: langchain\\_core.utils.function\\_calling.convert\\_to\\_openai\\_function()\n\n#### format\\_tool\\_to\\_openai\\_tool[​](#format_tool_to_openai_tool \"Direct link to format_tool_to_openai_tool\")\n\nIn module: `utils.function_calling` Deprecated: 0.1.16 Removal: 0.3.0\n\nAlternative: langchain\\_core.utils.function\\_calling.convert\\_to\\_openai\\_tool()\n\n### langchain[​](#langchain \"Direct link to langchain\")\n\n#### AgentType[​](#agenttype \"Direct link to AgentType\")\n\nIn module: `agents.agent_types` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative: Use [LangGraph](/v0.2/docs/how_to/migrate_agent/) or new agent constructor methods like create\\_react\\_agent, create\\_json\\_agent, create\\_structured\\_chat\\_agent, etc.\n\n#### Chain.**call**[​](#chaincall \"Direct link to chaincall\")\n\nIn module: `chains.base` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative: invoke\n\n#### Chain.acall[​](#chainacall \"Direct link to Chain.acall\")\n\nIn module: `chains.base` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative: ainvoke\n\n#### Chain.run[​](#chainrun-1 \"Direct link to Chain.run\")\n\nIn module: `chains.base` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative: invoke\n\n#### Chain.arun[​](#chainarun \"Direct link to Chain.arun\")\n\nIn module: `chains.base` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative: ainvoke\n\n#### Chain.apply[​](#chainapply \"Direct link to Chain.apply\")\n\nIn module: `chains.base` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative: batch\n\n#### LLMChain[​](#llmchain \"Direct link to LLMChain\")\n\nIn module: `chains.llm` Deprecated: 0.1.17 Removal: 0.3.0\n\nAlternative: [RunnableSequence](/v0.2/docs/how_to/sequence/), e.g., `prompt | llm`\n\n#### LLMSingleActionAgent[​](#llmsingleactionagent \"Direct link to LLMSingleActionAgent\")\n\nIn module: `agents.agent` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative: Use [LangGraph](/v0.2/docs/how_to/migrate_agent/) or new agent constructor methods like create\\_react\\_agent, create\\_json\\_agent, create\\_structured\\_chat\\_agent, etc.\n\n#### Agent[​](#agent \"Direct link to Agent\")\n\nIn module: `agents.agent` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative: Use [LangGraph](/v0.2/docs/how_to/migrate_agent/) or new agent constructor methods like create\\_react\\_agent, create\\_json\\_agent, create\\_structured\\_chat\\_agent, etc.\n\n#### OpenAIFunctionsAgent[​](#openaifunctionsagent \"Direct link to OpenAIFunctionsAgent\")\n\nIn module: `agents.openai_functions_agent.base` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative: create\\_openai\\_functions\\_agent\n\n#### ZeroShotAgent[​](#zeroshotagent \"Direct link to ZeroShotAgent\")\n\nIn module: `agents.mrkl.base` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative: create\\_react\\_agent\n\n#### MRKLChain[​](#mrklchain \"Direct link to MRKLChain\")\n\nIn module: `agents.mrkl.base` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative:\n\n#### ConversationalAgent[​](#conversationalagent \"Direct link to ConversationalAgent\")\n\nIn module: `agents.conversational.base` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative: create\\_react\\_agent\n\n#### ConversationalChatAgent[​](#conversationalchatagent \"Direct link to ConversationalChatAgent\")\n\nIn module: `agents.conversational_chat.base` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative: create\\_json\\_chat\\_agent\n\n#### ChatAgent[​](#chatagent \"Direct link to ChatAgent\")\n\nIn module: `agents.chat.base` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative: create\\_react\\_agent\n\n#### OpenAIMultiFunctionsAgent[​](#openaimultifunctionsagent \"Direct link to OpenAIMultiFunctionsAgent\")\n\nIn module: `agents.openai_functions_multi_agent.base` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative: create\\_openai\\_tools\\_agent\n\n#### ReActDocstoreAgent[​](#reactdocstoreagent \"Direct link to ReActDocstoreAgent\")\n\nIn module: `agents.react.base` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative:\n\n#### DocstoreExplorer[​](#docstoreexplorer \"Direct link to DocstoreExplorer\")\n\nIn module: `agents.react.base` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative:\n\n#### ReActTextWorldAgent[​](#reacttextworldagent \"Direct link to ReActTextWorldAgent\")\n\nIn module: `agents.react.base` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative:\n\n#### ReActChain[​](#reactchain \"Direct link to ReActChain\")\n\nIn module: `agents.react.base` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative:\n\n#### SelfAskWithSearchAgent[​](#selfaskwithsearchagent \"Direct link to SelfAskWithSearchAgent\")\n\nIn module: `agents.self_ask_with_search.base` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative: create\\_self\\_ask\\_with\\_search\n\n#### SelfAskWithSearchChain[​](#selfaskwithsearchchain \"Direct link to SelfAskWithSearchChain\")\n\nIn module: `agents.self_ask_with_search.base` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative:\n\n#### StructuredChatAgent[​](#structuredchatagent \"Direct link to StructuredChatAgent\")\n\nIn module: `agents.structured_chat.base` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative: create\\_structured\\_chat\\_agent\n\n#### RetrievalQA[​](#retrievalqa \"Direct link to RetrievalQA\")\n\nIn module: `chains.retrieval_qa.base` Deprecated: 0.1.17 Removal: 0.3.0\n\nAlternative: [create\\_retrieval\\_chain](https://api.python.langchain.com/en/latest/chains/langchain.chains.retrieval.create_retrieval_chain.html#langchain-chains-retrieval-create-retrieval-chain)\n\n#### load\\_agent\\_from\\_config[​](#load_agent_from_config \"Direct link to load_agent_from_config\")\n\nIn module: `agents.loading` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative:\n\n#### load\\_agent[​](#load_agent \"Direct link to load_agent\")\n\nIn module: `agents.loading` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative:\n\n#### initialize\\_agent[​](#initialize_agent \"Direct link to initialize_agent\")\n\nIn module: `agents.initialize` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative: Use [LangGraph](/v0.2/docs/how_to/migrate_agent/) or new agent constructor methods like create\\_react\\_agent, create\\_json\\_agent, create\\_structured\\_chat\\_agent, etc.\n\n#### XMLAgent[​](#xmlagent \"Direct link to XMLAgent\")\n\nIn module: `agents.xml.base` Deprecated: 0.1.0 Removal: 0.3.0\n\nAlternative: create\\_xml\\_agent\n\n#### CohereRerank[​](#coherererank \"Direct link to CohereRerank\")\n\nIn module: `retrievers.document_compressors.cohere_rerank` Deprecated: 0.0.30 Removal: 0.3.0\n\nAlternative: langchain\\_cohere.CohereRerank\n\n#### ConversationalRetrievalChain[​](#conversationalretrievalchain \"Direct link to ConversationalRetrievalChain\")\n\nIn module: `chains.conversational_retrieval.base` Deprecated: 0.1.17 Removal: 0.3.0\n\nAlternative: [create\\_history\\_aware\\_retriever](https://api.python.langchain.com/en/latest/chains/langchain.chains.history_aware_retriever.create_history_aware_retriever.html) together with [create\\_retrieval\\_chain](https://api.python.langchain.com/en/latest/chains/langchain.chains.retrieval.create_retrieval_chain.html#langchain-chains-retrieval-create-retrieval-chain) (see example in docstring)\n\n#### create\\_extraction\\_chain\\_pydantic[​](#create_extraction_chain_pydantic \"Direct link to create_extraction_chain_pydantic\")\n\nIn module: `chains.openai_tools.extraction` Deprecated: 0.1.14 Removal: 0.3.0\n\nAlternative: [with\\_structured\\_output](/v0.2/docs/how_to/structured_output/#the-with_structured_output-method) method on chat models that support tool calling.\n\n#### create\\_openai\\_fn\\_runnable[​](#create_openai_fn_runnable \"Direct link to create_openai_fn_runnable\")\n\nIn module: `chains.structured_output.base` Deprecated: 0.1.14 Removal: 0.3.0\n\nAlternative: [with\\_structured\\_output](/v0.2/docs/how_to/structured_output/#the-with_structured_output-method) method on chat models that support tool calling.\n\n#### create\\_structured\\_output\\_runnable[​](#create_structured_output_runnable \"Direct link to create_structured_output_runnable\")\n\nIn module: `chains.structured_output.base` Deprecated: 0.1.17 Removal: 0.3.0\n\nAlternative: [with\\_structured\\_output](/v0.2/docs/how_to/structured_output/#the-with_structured_output-method) method on chat models that support tool calling.\n\n#### create\\_openai\\_fn\\_chain[​](#create_openai_fn_chain \"Direct link to create_openai_fn_chain\")\n\nIn module: `chains.openai_functions.base` Deprecated: 0.1.1 Removal: 0.3.0\n\nAlternative: create\\_openai\\_fn\\_runnable\n\n#### create\\_structured\\_output\\_chain[​](#create_structured_output_chain \"Direct link to create_structured_output_chain\")\n\nIn module: `chains.openai_functions.base` Deprecated: 0.1.1 Removal: 0.3.0\n\nAlternative: ChatOpenAI.with\\_structured\\_output\n\n#### create\\_extraction\\_chain[​](#create_extraction_chain \"Direct link to create_extraction_chain\")\n\nIn module: `chains.openai_functions.extraction` Deprecated: 0.1.14 Removal: 0.3.0\n\nAlternative: [with\\_structured\\_output](/v0.2/docs/how_to/structured_output/#the-with_structured_output-method) method on chat models that support tool calling.\n\n#### create\\_extraction\\_chain\\_pydantic[​](#create_extraction_chain_pydantic-1 \"Direct link to create_extraction_chain_pydantic\")\n\nIn module: `chains.openai_functions.extraction` Deprecated: 0.1.14 Removal: 0.3.0\n\nAlternative: [with\\_structured\\_output](/v0.2/docs/how_to/structured_output/#the-with_structured_output-method) method on chat models that support tool calling.\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/versions/v0_2/deprecations.mdx)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nastream\\_events v2\n\n](/v0.2/docs/versions/v0_2/migrating_astream_events/)[\n\nNext\n\nSecurity\n\n](/v0.2/docs/security/)\n\n* [Breaking changes](#breaking-changes)\n * [@tool decorator](#tool-decorator)\n* [Code that moved to another package](#code-that-moved-to-another-package)\n* [Code targeted for removal](#code-targeted-for-removal)\n * [astream events V1](#astream-events-v1)\n * [langchain\\_core](#langchain_core)\n * [langchain](#langchain)"},"last_modified":{"kind":"null"}}},{"rowIdx":1328,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/document_loader_custom/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* Custom Document Loader\n\nOn this page\n\nHow to create a custom Document Loader\n======================================\n\nOverview[​](#overview \"Direct link to Overview\")\n------------------------------------------------\n\nApplications based on LLMs frequently entail extracting data from databases or files, like PDFs, and converting it into a format that LLMs can utilize. In LangChain, this usually involves creating Document objects, which encapsulate the extracted text (`page_content`) along with metadata—a dictionary containing details about the document, such as the author's name or the date of publication.\n\n`Document` objects are often formatted into prompts that are fed into an LLM, allowing the LLM to use the information in the `Document` to generate a desired response (e.g., summarizing the document). `Documents` can be either used immediately or indexed into a vectorstore for future retrieval and use.\n\nThe main abstractions for Document Loading are:\n\nComponent\n\nDescription\n\nDocument\n\nContains `text` and `metadata`\n\nBaseLoader\n\nUse to convert raw data into `Documents`\n\nBlob\n\nA representation of binary data that's located either in a file or in memory\n\nBaseBlobParser\n\nLogic to parse a `Blob` to yield `Document` objects\n\nThis guide will demonstrate how to write custom document loading and file parsing logic; specifically, we'll see how to:\n\n1. Create a standard document Loader by sub-classing from `BaseLoader`.\n2. Create a parser using `BaseBlobParser` and use it in conjunction with `Blob` and `BlobLoaders`. This is useful primarily when working with files.\n\nStandard Document Loader[​](#standard-document-loader \"Direct link to Standard Document Loader\")\n------------------------------------------------------------------------------------------------\n\nA document loader can be implemented by sub-classing from a `BaseLoader` which provides a standard interface for loading documents.\n\n### Interface[​](#interface \"Direct link to Interface\")\n\nMethod Name\n\nExplanation\n\nlazy\\_load\n\nUsed to load documents one by one **lazily**. Use for production code.\n\nalazy\\_load\n\nAsync variant of `lazy_load`\n\nload\n\nUsed to load all the documents into memory **eagerly**. Use for prototyping or interactive work.\n\naload\n\nUsed to load all the documents into memory **eagerly**. Use for prototyping or interactive work. **Added in 2024-04 to LangChain.**\n\n* The `load` methods is a convenience method meant solely for prototyping work -- it just invokes `list(self.lazy_load())`.\n* The `alazy_load` has a default implementation that will delegate to `lazy_load`. If you're using async, we recommend overriding the default implementation and providing a native async implementation.\n\n::: {.callout-important} When implementing a document loader do **NOT** provide parameters via the `lazy_load` or `alazy_load` methods.\n\nAll configuration is expected to be passed through the initializer (**init**). This was a design choice made by LangChain to make sure that once a document loader has been instantiated it has all the information needed to load documents. :::\n\n### Implementation[​](#implementation \"Direct link to Implementation\")\n\nLet's create an example of a standard document loader that loads a file and creates a document from each line in the file.\n\n from typing import AsyncIterator, Iteratorfrom langchain_core.document_loaders import BaseLoaderfrom langchain_core.documents import Documentclass CustomDocumentLoader(BaseLoader): \"\"\"An example document loader that reads a file line by line.\"\"\" def __init__(self, file_path: str) -> None: \"\"\"Initialize the loader with a file path. Args: file_path: The path to the file to load. \"\"\" self.file_path = file_path def lazy_load(self) -> Iterator[Document]: # <-- Does not take any arguments \"\"\"A lazy loader that reads a file line by line. When you're implementing lazy load methods, you should use a generator to yield documents one by one. \"\"\" with open(self.file_path, encoding=\"utf-8\") as f: line_number = 0 for line in f: yield Document( page_content=line, metadata={\"line_number\": line_number, \"source\": self.file_path}, ) line_number += 1 # alazy_load is OPTIONAL. # If you leave out the implementation, a default implementation which delegates to lazy_load will be used! async def alazy_load( self, ) -> AsyncIterator[Document]: # <-- Does not take any arguments \"\"\"An async lazy loader that reads a file line by line.\"\"\" # Requires aiofiles # Install with `pip install aiofiles` # https://github.com/Tinche/aiofiles import aiofiles async with aiofiles.open(self.file_path, encoding=\"utf-8\") as f: line_number = 0 async for line in f: yield Document( page_content=line, metadata={\"line_number\": line_number, \"source\": self.file_path}, ) line_number += 1\n\n**API Reference:**[BaseLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_core.document_loaders.base.BaseLoader.html) | [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html)\n\n### Test 🧪[​](#test- \"Direct link to Test 🧪\")\n\nTo test out the document loader, we need a file with some quality content.\n\n with open(\"./meow.txt\", \"w\", encoding=\"utf-8\") as f: quality_content = \"meow meow🐱 \\n meow meow🐱 \\n meow😻😻\" f.write(quality_content)loader = CustomDocumentLoader(\"./meow.txt\")\n\n ## Test out the lazy load interfacefor doc in loader.lazy_load(): print() print(type(doc)) print(doc)\n\n page_content='meow meow🐱 \\n' metadata={'line_number': 0, 'source': './meow.txt'}page_content=' meow meow🐱 \\n' metadata={'line_number': 1, 'source': './meow.txt'}page_content=' meow😻😻' metadata={'line_number': 2, 'source': './meow.txt'}\n\n ## Test out the async implementationasync for doc in loader.alazy_load(): print() print(type(doc)) print(doc)\n\n page_content='meow meow🐱 \\n' metadata={'line_number': 0, 'source': './meow.txt'}page_content=' meow meow🐱 \\n' metadata={'line_number': 1, 'source': './meow.txt'}page_content=' meow😻😻' metadata={'line_number': 2, 'source': './meow.txt'}\n\n::: {.callout-tip}\n\n`load()` can be helpful in an interactive environment such as a jupyter notebook.\n\nAvoid using it for production code since eager loading assumes that all the content can fit into memory, which is not always the case, especially for enterprise data. :::\n\n loader.load()\n\n [Document(page_content='meow meow🐱 \\n', metadata={'line_number': 0, 'source': './meow.txt'}), Document(page_content=' meow meow🐱 \\n', metadata={'line_number': 1, 'source': './meow.txt'}), Document(page_content=' meow😻😻', metadata={'line_number': 2, 'source': './meow.txt'})]\n\nWorking with Files[​](#working-with-files \"Direct link to Working with Files\")\n------------------------------------------------------------------------------\n\nMany document loaders invovle parsing files. The difference between such loaders usually stems from how the file is parsed rather than how the file is loaded. For example, you can use `open` to read the binary content of either a PDF or a markdown file, but you need different parsing logic to convert that binary data into text.\n\nAs a result, it can be helpful to decouple the parsing logic from the loading logic, which makes it easier to re-use a given parser regardless of how the data was loaded.\n\n### BaseBlobParser[​](#baseblobparser \"Direct link to BaseBlobParser\")\n\nA `BaseBlobParser` is an interface that accepts a `blob` and outputs a list of `Document` objects. A `blob` is a representation of data that lives either in memory or in a file. LangChain python has a `Blob` primitive which is inspired by the [Blob WebAPI spec](https://developer.mozilla.org/en-US/docs/Web/API/Blob).\n\n from langchain_core.document_loaders import BaseBlobParser, Blobclass MyParser(BaseBlobParser): \"\"\"A simple parser that creates a document from each line.\"\"\" def lazy_parse(self, blob: Blob) -> Iterator[Document]: \"\"\"Parse a blob into a document line by line.\"\"\" line_number = 0 with blob.as_bytes_io() as f: for line in f: line_number += 1 yield Document( page_content=line, metadata={\"line_number\": line_number, \"source\": blob.source}, )\n\n**API Reference:**[BaseBlobParser](https://api.python.langchain.com/en/latest/document_loaders/langchain_core.document_loaders.base.BaseBlobParser.html) | [Blob](https://api.python.langchain.com/en/latest/document_loaders/langchain_core.document_loaders.blob_loaders.Blob.html)\n\n blob = Blob.from_path(\"./meow.txt\")parser = MyParser()\n\n list(parser.lazy_parse(blob))\n\n [Document(page_content='meow meow🐱 \\n', metadata={'line_number': 1, 'source': './meow.txt'}), Document(page_content=' meow meow🐱 \\n', metadata={'line_number': 2, 'source': './meow.txt'}), Document(page_content=' meow😻😻', metadata={'line_number': 3, 'source': './meow.txt'})]\n\nUsing the **blob** API also allows one to load content direclty from memory without having to read it from a file!\n\n blob = Blob(data=b\"some data from memory\\nmeow\")list(parser.lazy_parse(blob))\n\n [Document(page_content='some data from memory\\n', metadata={'line_number': 1, 'source': None}), Document(page_content='meow', metadata={'line_number': 2, 'source': None})]\n\n### Blob[​](#blob \"Direct link to Blob\")\n\nLet's take a quick look through some of the Blob API.\n\n blob = Blob.from_path(\"./meow.txt\", metadata={\"foo\": \"bar\"})\n\n blob.encoding\n\n 'utf-8'\n\n blob.as_bytes()\n\n b'meow meow\\xf0\\x9f\\x90\\xb1 \\n meow meow\\xf0\\x9f\\x90\\xb1 \\n meow\\xf0\\x9f\\x98\\xbb\\xf0\\x9f\\x98\\xbb'\n\n blob.as_string()\n\n 'meow meow🐱 \\n meow meow🐱 \\n meow😻😻'\n\n blob.as_bytes_io()\n\n \n\n blob.metadata\n\n {'foo': 'bar'}\n\n blob.source\n\n './meow.txt'\n\n### Blob Loaders[​](#blob-loaders \"Direct link to Blob Loaders\")\n\nWhile a parser encapsulates the logic needed to parse binary data into documents, _blob loaders_ encapsulate the logic that's necessary to load blobs from a given storage location.\n\nA the moment, `LangChain` only supports `FileSystemBlobLoader`.\n\nYou can use the `FileSystemBlobLoader` to load blobs and then use the parser to parse them.\n\n from langchain_community.document_loaders.blob_loaders import FileSystemBlobLoaderblob_loader = FileSystemBlobLoader(path=\".\", glob=\"*.mdx\", show_progress=True)\n\n**API Reference:**[FileSystemBlobLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.blob_loaders.file_system.FileSystemBlobLoader.html)\n\n parser = MyParser()for blob in blob_loader.yield_blobs(): for doc in parser.lazy_parse(blob): print(doc) break\n\n 0%| | 0/8 [00:00[The Microsoft Office](https://www.office.com/) suite of productivity software includes Microsoft Word, Microsoft Excel, Microsoft PowerPoint, Microsoft Outlook, and Microsoft OneNote. It is available for Microsoft Windows and macOS operating systems. It is also available on Android and iOS.\\n' metadata={'line_number': 3, 'source': 'office_file.mdx'}page_content='\\n' metadata={'line_number': 4, 'source': 'office_file.mdx'}page_content='This covers how to load commonly used file formats including `DOCX`, `XLSX` and `PPTX` documents into a document format that we can use downstream.\\n' metadata={'line_number': 5, 'source': 'office_file.mdx'}... output truncated for demo purposes\n\n#### Custom Generic Loader[​](#custom-generic-loader \"Direct link to Custom Generic Loader\")\n\nIf you really like creating classes, you can sub-class and create a class to encapsulate the logic together.\n\nYou can sub-class from this class to load content using an existing loader.\n\n from typing import Anyclass MyCustomLoader(GenericLoader): @staticmethod def get_parser(**kwargs: Any) -> BaseBlobParser: \"\"\"Override this method to associate a default parser with the class.\"\"\" return MyParser()\n\n loader = MyCustomLoader.from_filesystem(path=\".\", glob=\"*.mdx\", show_progress=True)for idx, doc in enumerate(loader.lazy_load()): if idx < 5: print(doc)print(\"... output truncated for demo purposes\")\n\n 0%| | 0/8 [00:00[The Microsoft Office](https://www.office.com/) suite of productivity software includes Microsoft Word, Microsoft Excel, Microsoft PowerPoint, Microsoft Outlook, and Microsoft OneNote. It is available for Microsoft Windows and macOS operating systems. It is also available on Android and iOS.\\n' metadata={'line_number': 3, 'source': 'office_file.mdx'}page_content='\\n' metadata={'line_number': 4, 'source': 'office_file.mdx'}page_content='This covers how to load commonly used file formats including `DOCX`, `XLSX` and `PPTX` documents into a document format that we can use downstream.\\n' metadata={'line_number': 5, 'source': 'office_file.mdx'}... output truncated for demo purposes\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/document_loader_custom.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow deal with high cardinality categoricals when doing query analysis\n\n](/v0.2/docs/how_to/query_high_cardinality/)[\n\nNext\n\nHow to split by HTML header\n\n](/v0.2/docs/how_to/HTML_header_metadata_splitter/)\n\n* [Overview](#overview)\n* [Standard Document Loader](#standard-document-loader)\n * [Interface](#interface)\n * [Implementation](#implementation)\n * [Test 🧪](#test-)\n* [Working with Files](#working-with-files)\n * [BaseBlobParser](#baseblobparser)\n * [Blob](#blob)\n * [Blob Loaders](#blob-loaders)\n * [Generic Loader](#generic-loader)"},"last_modified":{"kind":"null"}}},{"rowIdx":1329,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/contextual_compression/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to do retrieval with contextual compression\n\nOn this page\n\nHow to do retrieval with contextual compression\n===============================================\n\nOne challenge with retrieval is that usually you don't know the specific queries your document storage system will face when you ingest data into the system. This means that the information most relevant to a query may be buried in a document with a lot of irrelevant text. Passing that full document through your application can lead to more expensive LLM calls and poorer responses.\n\nContextual compression is meant to fix this. The idea is simple: instead of immediately returning retrieved documents as-is, you can compress them using the context of the given query, so that only the relevant information is returned. “Compressing” here refers to both compressing the contents of an individual document and filtering out documents wholesale.\n\nTo use the Contextual Compression Retriever, you'll need:\n\n* a base retriever\n* a Document Compressor\n\nThe Contextual Compression Retriever passes queries to the base retriever, takes the initial documents and passes them through the Document Compressor. The Document Compressor takes a list of documents and shortens it by reducing the contents of documents or dropping documents altogether.\n\nGet started[​](#get-started \"Direct link to Get started\")\n---------------------------------------------------------\n\n # Helper function for printing docsdef pretty_print_docs(docs): print( f\"\\n{'-' * 100}\\n\".join( [f\"Document {i+1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)] ) )\n\nUsing a vanilla vector store retriever[​](#using-a-vanilla-vector-store-retriever \"Direct link to Using a vanilla vector store retriever\")\n------------------------------------------------------------------------------------------------------------------------------------------\n\nLet's start by initializing a simple vector store retriever and storing the 2023 State of the Union speech (in chunks). We can see that given an example question our retriever returns one or two relevant docs and a few irrelevant docs. And even the relevant docs have a lot of irrelevant information in them.\n\n from langchain_community.document_loaders import TextLoaderfrom langchain_community.vectorstores import FAISSfrom langchain_openai import OpenAIEmbeddingsfrom langchain_text_splitters import CharacterTextSplitterdocuments = TextLoader(\"state_of_the_union.txt\").load()text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)texts = text_splitter.split_documents(documents)retriever = FAISS.from_documents(texts, OpenAIEmbeddings()).as_retriever()docs = retriever.invoke(\"What did the president say about Ketanji Brown Jackson\")pretty_print_docs(docs)\n\n**API Reference:**[TextLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.text.TextLoader.html) | [FAISS](https://api.python.langchain.com/en/latest/vectorstores/langchain_community.vectorstores.faiss.FAISS.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) | [CharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.CharacterTextSplitter.html)\n\n Document 1:Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.----------------------------------------------------------------------------------------------------Document 2:A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. We can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. We’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. We’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. We’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders.----------------------------------------------------------------------------------------------------Document 3:And for our LGBTQ+ Americans, let’s finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families is wrong. As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. While it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice. And soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. So tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together. First, beat the opioid epidemic.----------------------------------------------------------------------------------------------------Document 4:Tonight, I’m announcing a crackdown on these companies overcharging American businesses and consumers. And as Wall Street firms take over more nursing homes, quality in those homes has gone down and costs have gone up. That ends on my watch. Medicare is going to set higher standards for nursing homes and make sure your loved ones get the care they deserve and expect. We’ll also cut costs and keep the economy going strong by giving workers a fair shot, provide more training and apprenticeships, hire them based on their skills not degrees. Let’s pass the Paycheck Fairness Act and paid leave. Raise the minimum wage to $15 an hour and extend the Child Tax Credit, so no one has to raise a family in poverty. Let’s increase Pell Grants and increase our historic support of HBCUs, and invest in what Jill—our First Lady who teaches full-time—calls America’s best-kept secret: community colleges.\n\nAdding contextual compression with an `LLMChainExtractor`[​](#adding-contextual-compression-with-an-llmchainextractor \"Direct link to adding-contextual-compression-with-an-llmchainextractor\")\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\nNow let's wrap our base retriever with a `ContextualCompressionRetriever`. We'll add an `LLMChainExtractor`, which will iterate over the initially returned documents and extract from each only the content that is relevant to the query.\n\n from langchain.retrievers import ContextualCompressionRetrieverfrom langchain.retrievers.document_compressors import LLMChainExtractorfrom langchain_openai import OpenAIllm = OpenAI(temperature=0)compressor = LLMChainExtractor.from_llm(llm)compression_retriever = ContextualCompressionRetriever( base_compressor=compressor, base_retriever=retriever)compressed_docs = compression_retriever.invoke( \"What did the president say about Ketanji Jackson Brown\")pretty_print_docs(compressed_docs)\n\n**API Reference:**[ContextualCompressionRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.contextual_compression.ContextualCompressionRetriever.html) | [LLMChainExtractor](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.document_compressors.chain_extract.LLMChainExtractor.html) | [OpenAI](https://api.python.langchain.com/en/latest/llms/langchain_openai.llms.base.OpenAI.html)\n\n Document 1:I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson.\n\nMore built-in compressors: filters[​](#more-built-in-compressors-filters \"Direct link to More built-in compressors: filters\")\n-----------------------------------------------------------------------------------------------------------------------------\n\n### `LLMChainFilter`[​](#llmchainfilter \"Direct link to llmchainfilter\")\n\nThe `LLMChainFilter` is slightly simpler but more robust compressor that uses an LLM chain to decide which of the initially retrieved documents to filter out and which ones to return, without manipulating the document contents.\n\n from langchain.retrievers.document_compressors import LLMChainFilter_filter = LLMChainFilter.from_llm(llm)compression_retriever = ContextualCompressionRetriever( base_compressor=_filter, base_retriever=retriever)compressed_docs = compression_retriever.invoke( \"What did the president say about Ketanji Jackson Brown\")pretty_print_docs(compressed_docs)\n\n**API Reference:**[LLMChainFilter](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.document_compressors.chain_filter.LLMChainFilter.html)\n\n Document 1:Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n\n### `EmbeddingsFilter`[​](#embeddingsfilter \"Direct link to embeddingsfilter\")\n\nMaking an extra LLM call over each retrieved document is expensive and slow. The `EmbeddingsFilter` provides a cheaper and faster option by embedding the documents and query and only returning those documents which have sufficiently similar embeddings to the query.\n\n from langchain.retrievers.document_compressors import EmbeddingsFilterfrom langchain_openai import OpenAIEmbeddingsembeddings = OpenAIEmbeddings()embeddings_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.76)compression_retriever = ContextualCompressionRetriever( base_compressor=embeddings_filter, base_retriever=retriever)compressed_docs = compression_retriever.invoke( \"What did the president say about Ketanji Jackson Brown\")pretty_print_docs(compressed_docs)\n\n**API Reference:**[EmbeddingsFilter](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.document_compressors.embeddings_filter.EmbeddingsFilter.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html)\n\n Document 1:Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.----------------------------------------------------------------------------------------------------Document 2:A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. We can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. We’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. We’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. We’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders.\n\nStringing compressors and document transformers together[​](#stringing-compressors-and-document-transformers-together \"Direct link to Stringing compressors and document transformers together\")\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\nUsing the `DocumentCompressorPipeline` we can also easily combine multiple compressors in sequence. Along with compressors we can add `BaseDocumentTransformer`s to our pipeline, which don't perform any contextual compression but simply perform some transformation on a set of documents. For example `TextSplitter`s can be used as document transformers to split documents into smaller pieces, and the `EmbeddingsRedundantFilter` can be used to filter out redundant documents based on embedding similarity between documents.\n\nBelow we create a compressor pipeline by first splitting our docs into smaller chunks, then removing redundant documents, and then filtering based on relevance to the query.\n\n from langchain.retrievers.document_compressors import DocumentCompressorPipelinefrom langchain_community.document_transformers import EmbeddingsRedundantFilterfrom langchain_text_splitters import CharacterTextSplittersplitter = CharacterTextSplitter(chunk_size=300, chunk_overlap=0, separator=\". \")redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings)relevant_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.76)pipeline_compressor = DocumentCompressorPipeline( transformers=[splitter, redundant_filter, relevant_filter])\n\n**API Reference:**[DocumentCompressorPipeline](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.document_compressors.base.DocumentCompressorPipeline.html) | [EmbeddingsRedundantFilter](https://api.python.langchain.com/en/latest/document_transformers/langchain_community.document_transformers.embeddings_redundant_filter.EmbeddingsRedundantFilter.html) | [CharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.CharacterTextSplitter.html)\n\n compression_retriever = ContextualCompressionRetriever( base_compressor=pipeline_compressor, base_retriever=retriever)compressed_docs = compression_retriever.invoke( \"What did the president say about Ketanji Jackson Brown\")pretty_print_docs(compressed_docs)\n\n Document 1:One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson----------------------------------------------------------------------------------------------------Document 2:As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. While it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year----------------------------------------------------------------------------------------------------Document 3:A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder----------------------------------------------------------------------------------------------------Document 4:Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. We can do both\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/contextual_compression.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to split code\n\n](/v0.2/docs/how_to/code_splitter/)[\n\nNext\n\nHow to create custom callback handlers\n\n](/v0.2/docs/how_to/custom_callbacks/)\n\n* [Get started](#get-started)\n* [Using a vanilla vector store retriever](#using-a-vanilla-vector-store-retriever)\n* [Adding contextual compression with an `LLMChainExtractor`](#adding-contextual-compression-with-an-llmchainextractor)\n* [More built-in compressors: filters](#more-built-in-compressors-filters)\n * [`LLMChainFilter`](#llmchainfilter)\n * [`EmbeddingsFilter`](#embeddingsfilter)\n* [Stringing compressors and document transformers together](#stringing-compressors-and-document-transformers-together)"},"last_modified":{"kind":"null"}}},{"rowIdx":1330,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/security/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* Security\n\nOn this page\n\nSecurity\n========\n\nLangChain has a large ecosystem of integrations with various external resources like local and remote file systems, APIs and databases. These integrations allow developers to create versatile applications that combine the power of LLMs with the ability to access, interact with and manipulate external resources.\n\nBest practices[​](#best-practices \"Direct link to Best practices\")\n------------------------------------------------------------------\n\nWhen building such applications developers should remember to follow good security practices:\n\n* [**Limit Permissions**](https://en.wikipedia.org/wiki/Principle_of_least_privilege): Scope permissions specifically to the application's need. Granting broad or excessive permissions can introduce significant security vulnerabilities. To avoid such vulnerabilities, consider using read-only credentials, disallowing access to sensitive resources, using sandboxing techniques (such as running inside a container), etc. as appropriate for your application.\n* **Anticipate Potential Misuse**: Just as humans can err, so can Large Language Models (LLMs). Always assume that any system access or credentials may be used in any way allowed by the permissions they are assigned. For example, if a pair of database credentials allows deleting data, it’s safest to assume that any LLM able to use those credentials may in fact delete data.\n* [**Defense in Depth**](https://en.wikipedia.org/wiki/Defense_in_depth_\\(computing\\)): No security technique is perfect. Fine-tuning and good chain design can reduce, but not eliminate, the odds that a Large Language Model (LLM) may make a mistake. It’s best to combine multiple layered security approaches rather than relying on any single layer of defense to ensure security. For example: use both read-only permissions and sandboxing to ensure that LLMs are only able to access data that is explicitly meant for them to use.\n\nRisks of not doing so include, but are not limited to:\n\n* Data corruption or loss.\n* Unauthorized access to confidential information.\n* Compromised performance or availability of critical resources.\n\nExample scenarios with mitigation strategies:\n\n* A user may ask an agent with access to the file system to delete files that should not be deleted or read the content of files that contain sensitive information. To mitigate, limit the agent to only use a specific directory and only allow it to read or write files that are safe to read or write. Consider further sandboxing the agent by running it in a container.\n* A user may ask an agent with write access to an external API to write malicious data to the API, or delete data from that API. To mitigate, give the agent read-only API keys, or limit it to only use endpoints that are already resistant to such misuse.\n* A user may ask an agent with access to a database to drop a table or mutate the schema. To mitigate, scope the credentials to only the tables that the agent needs to access and consider issuing READ-ONLY credentials.\n\nIf you're building applications that access external resources like file systems, APIs or databases, consider speaking with your company's security team to determine how to best design and secure your applications.\n\nReporting a vulnerability[​](#reporting-a-vulnerability \"Direct link to Reporting a vulnerability\")\n---------------------------------------------------------------------------------------------------\n\nPlease report security vulnerabilities by email to [security@langchain.dev.](mailto:security@langchain.dev.) This will ensure the issue is promptly triaged and acted upon as needed.\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/security.md)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nChanges\n\n](/v0.2/docs/versions/v0_2/deprecations/)\n\n* [Best practices](#best-practices)\n* [Reporting a vulnerability](#reporting-a-vulnerability)"},"last_modified":{"kind":"null"}}},{"rowIdx":1331,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/HTML_header_metadata_splitter/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to split by HTML header\n\nOn this page\n\nHow to split by HTML header\n===========================\n\nDescription and motivation[​](#description-and-motivation \"Direct link to Description and motivation\")\n------------------------------------------------------------------------------------------------------\n\n[HTMLHeaderTextSplitter](https://api.python.langchain.com/en/latest/html/langchain_text_splitters.html.HTMLHeaderTextSplitter.html) is a \"structure-aware\" chunker that splits text at the HTML element level and adds metadata for each header \"relevant\" to any given chunk. It can return chunks element by element or combine elements with the same metadata, with the objectives of (a) keeping related text grouped (more or less) semantically and (b) preserving context-rich information encoded in document structures. It can be used with other text splitters as part of a chunking pipeline.\n\nIt is analogous to the [MarkdownHeaderTextSplitter](/v0.2/docs/how_to/markdown_header_metadata_splitter/) for markdown files.\n\nTo specify what headers to split on, specify `headers_to_split_on` when instantiating `HTMLHeaderTextSplitter` as shown below.\n\nUsage examples[​](#usage-examples \"Direct link to Usage examples\")\n------------------------------------------------------------------\n\n### 1) How to split HTML strings:[​](#1-how-to-split-html-strings \"Direct link to 1) How to split HTML strings:\")\n\n %pip install -qU langchain-text-splitters\n\n from langchain_text_splitters import HTMLHeaderTextSplitterhtml_string = \"\"\"

Foo

Some intro text about Foo.

Bar main section

Some intro text about Bar.

Bar subsection 1

Some text about the first subtopic of Bar.

Bar subsection 2

Some text about the second subtopic of Bar.

Baz

Some text about Baz


Some concluding text about Foo

\"\"\"headers_to_split_on = [ (\"h1\", \"Header 1\"), (\"h2\", \"Header 2\"), (\"h3\", \"Header 3\"),]html_splitter = HTMLHeaderTextSplitter(headers_to_split_on)html_header_splits = html_splitter.split_text(html_string)html_header_splits\n\n**API Reference:**[HTMLHeaderTextSplitter](https://api.python.langchain.com/en/latest/html/langchain_text_splitters.html.HTMLHeaderTextSplitter.html)\n\n [Document(page_content='Foo'), Document(page_content='Some intro text about Foo. \\nBar main section Bar subsection 1 Bar subsection 2', metadata={'Header 1': 'Foo'}), Document(page_content='Some intro text about Bar.', metadata={'Header 1': 'Foo', 'Header 2': 'Bar main section'}), Document(page_content='Some text about the first subtopic of Bar.', metadata={'Header 1': 'Foo', 'Header 2': 'Bar main section', 'Header 3': 'Bar subsection 1'}), Document(page_content='Some text about the second subtopic of Bar.', metadata={'Header 1': 'Foo', 'Header 2': 'Bar main section', 'Header 3': 'Bar subsection 2'}), Document(page_content='Baz', metadata={'Header 1': 'Foo'}), Document(page_content='Some text about Baz', metadata={'Header 1': 'Foo', 'Header 2': 'Baz'}), Document(page_content='Some concluding text about Foo', metadata={'Header 1': 'Foo'})]\n\nTo return each element together with their associated headers, specify `return_each_element=True` when instantiating `HTMLHeaderTextSplitter`:\n\n html_splitter = HTMLHeaderTextSplitter( headers_to_split_on, return_each_element=True,)html_header_splits_elements = html_splitter.split_text(html_string)\n\nComparing with the above, where elements are aggregated by their headers:\n\n for element in html_header_splits[:2]: print(element)\n\n page_content='Foo'page_content='Some intro text about Foo. \\nBar main section Bar subsection 1 Bar subsection 2' metadata={'Header 1': 'Foo'}\n\nNow each element is returned as a distinct `Document`:\n\n for element in html_header_splits_elements[:3]: print(element)\n\n page_content='Foo'page_content='Some intro text about Foo.' metadata={'Header 1': 'Foo'}page_content='Bar main section Bar subsection 1 Bar subsection 2' metadata={'Header 1': 'Foo'}\n\n#### 2) How to split from a URL or HTML file:[​](#2-how-to-split-from-a-url-or-html-file \"Direct link to 2) How to split from a URL or HTML file:\")\n\nTo read directly from a URL, pass the URL string into the `split_text_from_url` method.\n\nSimilarly, a local HTML file can be passed to the `split_text_from_file` method.\n\n url = \"https://plato.stanford.edu/entries/goedel/\"headers_to_split_on = [ (\"h1\", \"Header 1\"), (\"h2\", \"Header 2\"), (\"h3\", \"Header 3\"), (\"h4\", \"Header 4\"),]html_splitter = HTMLHeaderTextSplitter(headers_to_split_on)# for local file use html_splitter.split_text_from_file()html_header_splits = html_splitter.split_text_from_url(url)\n\n### 2) How to constrain chunk sizes:[​](#2-how-to-constrain-chunk-sizes \"Direct link to 2) How to constrain chunk sizes:\")\n\n`HTMLHeaderTextSplitter`, which splits based on HTML headers, can be composed with another splitter which constrains splits based on character lengths, such as `RecursiveCharacterTextSplitter`.\n\nThis can be done using the `.split_documents` method of the second splitter:\n\n from langchain_text_splitters import RecursiveCharacterTextSplitterchunk_size = 500chunk_overlap = 30text_splitter = RecursiveCharacterTextSplitter( chunk_size=chunk_size, chunk_overlap=chunk_overlap)# Splitsplits = text_splitter.split_documents(html_header_splits)splits[80:85]\n\n**API Reference:**[RecursiveCharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.RecursiveCharacterTextSplitter.html)\n\n [Document(page_content='We see that Gödel first tried to reduce the consistency problem for analysis to that of arithmetic. This seemed to require a truth definition for arithmetic, which in turn led to paradoxes, such as the Liar paradox (“This sentence is false”) and Berry’s paradox (“The least number not defined by an expression consisting of just fourteen English words”). Gödel then noticed that such paradoxes would not necessarily arise if truth were replaced by provability. But this means that arithmetic truth', metadata={'Header 1': 'Kurt Gödel', 'Header 2': '2. Gödel’s Mathematical Work', 'Header 3': '2.2 The Incompleteness Theorems', 'Header 4': '2.2.1 The First Incompleteness Theorem'}), Document(page_content='means that arithmetic truth and arithmetic provability are not co-extensive — whence the First Incompleteness Theorem.', metadata={'Header 1': 'Kurt Gödel', 'Header 2': '2. Gödel’s Mathematical Work', 'Header 3': '2.2 The Incompleteness Theorems', 'Header 4': '2.2.1 The First Incompleteness Theorem'}), Document(page_content='This account of Gödel’s discovery was told to Hao Wang very much after the fact; but in Gödel’s contemporary correspondence with Bernays and Zermelo, essentially the same description of his path to the theorems is given. (See Gödel 2003a and Gödel 2003b respectively.) From those accounts we see that the undefinability of truth in arithmetic, a result credited to Tarski, was likely obtained in some form by Gödel by 1931. But he neither publicized nor published the result; the biases logicians', metadata={'Header 1': 'Kurt Gödel', 'Header 2': '2. Gödel’s Mathematical Work', 'Header 3': '2.2 The Incompleteness Theorems', 'Header 4': '2.2.1 The First Incompleteness Theorem'}), Document(page_content='result; the biases logicians had expressed at the time concerning the notion of truth, biases which came vehemently to the fore when Tarski announced his results on the undefinability of truth in formal systems 1935, may have served as a deterrent to Gödel’s publication of that theorem.', metadata={'Header 1': 'Kurt Gödel', 'Header 2': '2. Gödel’s Mathematical Work', 'Header 3': '2.2 The Incompleteness Theorems', 'Header 4': '2.2.1 The First Incompleteness Theorem'}), Document(page_content='We now describe the proof of the two theorems, formulating Gödel’s results in Peano arithmetic. Gödel himself used a system related to that defined in Principia Mathematica, but containing Peano arithmetic. In our presentation of the First and Second Incompleteness Theorems we refer to Peano arithmetic as P, following Gödel’s notation.', metadata={'Header 1': 'Kurt Gödel', 'Header 2': '2. Gödel’s Mathematical Work', 'Header 3': '2.2 The Incompleteness Theorems', 'Header 4': '2.2.2 The proof of the First Incompleteness Theorem'})]\n\nLimitations[​](#limitations \"Direct link to Limitations\")\n---------------------------------------------------------\n\nThere can be quite a bit of structural variation from one HTML document to another, and while `HTMLHeaderTextSplitter` will attempt to attach all \"relevant\" headers to any given chunk, it can sometimes miss certain headers. For example, the algorithm assumes an informational hierarchy in which headers are always at nodes \"above\" associated text, i.e. prior siblings, ancestors, and combinations thereof. In the following news article (as of the writing of this document), the document is structured such that the text of the top-level headline, while tagged \"h1\", is in a _distinct_ subtree from the text elements that we'd expect it to be _\"above\"_—so we can observe that the \"h1\" element and its associated text do not show up in the chunk metadata (but, where applicable, we do see \"h2\" and its associated text):\n\n url = \"https://www.cnn.com/2023/09/25/weather/el-nino-winter-us-climate/index.html\"headers_to_split_on = [ (\"h1\", \"Header 1\"), (\"h2\", \"Header 2\"),]html_splitter = HTMLHeaderTextSplitter(headers_to_split_on)html_header_splits = html_splitter.split_text_from_url(url)print(html_header_splits[1].page_content[:500])\n\n No two El Niño winters are the same, but many have temperature and precipitation trends in common. Average conditions during an El Niño winter across the continental US. One of the major reasons is the position of the jet stream, which often shifts south during an El Niño winter. This shift typically brings wetter and cooler weather to the South while the North becomes drier and warmer, according to NOAA. Because the jet stream is essentially a river of air that storms flow through, they c\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/HTML_header_metadata_splitter.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nCustom Document Loader\n\n](/v0.2/docs/how_to/document_loader_custom/)[\n\nNext\n\nHow to split by HTML sections\n\n](/v0.2/docs/how_to/HTML_section_aware_splitter/)\n\n* [Description and motivation](#description-and-motivation)\n* [Usage examples](#usage-examples)\n * [1) How to split HTML strings:](#1-how-to-split-html-strings)\n * [2) How to constrain chunk sizes:](#2-how-to-constrain-chunk-sizes)\n* [Limitations](#limitations)"},"last_modified":{"kind":"null"}}},{"rowIdx":1332,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/HTML_section_aware_splitter/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to split by HTML sections\n\nOn this page\n\nHow to split by HTML sections\n=============================\n\nDescription and motivation[​](#description-and-motivation \"Direct link to Description and motivation\")\n------------------------------------------------------------------------------------------------------\n\nSimilar in concept to the [HTMLHeaderTextSplitter](/v0.2/docs/how_to/HTML_header_metadata_splitter/), the `HTMLSectionSplitter` is a \"structure-aware\" chunker that splits text at the element level and adds metadata for each header \"relevant\" to any given chunk.\n\nIt can return chunks element by element or combine elements with the same metadata, with the objectives of (a) keeping related text grouped (more or less) semantically and (b) preserving context-rich information encoded in document structures.\n\nUse `xslt_path` to provide an absolute path to transform the HTML so that it can detect sections based on provided tags. The default is to use the `converting_to_header.xslt` file in the `data_connection/document_transformers` directory. This is for converting the html to a format/layout that is easier to detect sections. For example, `span` based on their font size can be converted to header tags to be detected as a section.\n\nUsage examples[​](#usage-examples \"Direct link to Usage examples\")\n------------------------------------------------------------------\n\n### 1) How to split HTML strings:[​](#1-how-to-split-html-strings \"Direct link to 1) How to split HTML strings:\")\n\n from langchain_text_splitters import HTMLSectionSplitterhtml_string = \"\"\"

Foo

Some intro text about Foo.

Bar main section

Some intro text about Bar.

Bar subsection 1

Some text about the first subtopic of Bar.

Bar subsection 2

Some text about the second subtopic of Bar.

Baz

Some text about Baz


Some concluding text about Foo

\"\"\"headers_to_split_on = [(\"h1\", \"Header 1\"), (\"h2\", \"Header 2\")]html_splitter = HTMLSectionSplitter(headers_to_split_on)html_header_splits = html_splitter.split_text(html_string)html_header_splits\n\n**API Reference:**[HTMLSectionSplitter](https://api.python.langchain.com/en/latest/html/langchain_text_splitters.html.HTMLSectionSplitter.html)\n\n [Document(page_content='Foo \\n Some intro text about Foo.', metadata={'Header 1': 'Foo'}), Document(page_content='Bar main section \\n Some intro text about Bar. \\n Bar subsection 1 \\n Some text about the first subtopic of Bar. \\n Bar subsection 2 \\n Some text about the second subtopic of Bar.', metadata={'Header 2': 'Bar main section'}), Document(page_content='Baz \\n Some text about Baz \\n \\n \\n Some concluding text about Foo', metadata={'Header 2': 'Baz'})]\n\n### 2) How to constrain chunk sizes:[​](#2-how-to-constrain-chunk-sizes \"Direct link to 2) How to constrain chunk sizes:\")\n\n`HTMLSectionSplitter` can be used with other text splitters as part of a chunking pipeline. Internally, it uses the `RecursiveCharacterTextSplitter` when the section size is larger than the chunk size. It also considers the font size of the text to determine whether it is a section or not based on the determined font size threshold.\n\n from langchain_text_splitters import RecursiveCharacterTextSplitterhtml_string = \"\"\"

Foo

Some intro text about Foo.

Bar main section

Some intro text about Bar.

Bar subsection 1

Some text about the first subtopic of Bar.

Bar subsection 2

Some text about the second subtopic of Bar.

Baz

Some text about Baz


Some concluding text about Foo

\"\"\"headers_to_split_on = [ (\"h1\", \"Header 1\"), (\"h2\", \"Header 2\"), (\"h3\", \"Header 3\"), (\"h4\", \"Header 4\"),]html_splitter = HTMLSectionSplitter(headers_to_split_on)html_header_splits = html_splitter.split_text(html_string)chunk_size = 500chunk_overlap = 30text_splitter = RecursiveCharacterTextSplitter( chunk_size=chunk_size, chunk_overlap=chunk_overlap)# Splitsplits = text_splitter.split_documents(html_header_splits)splits\n\n**API Reference:**[RecursiveCharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.RecursiveCharacterTextSplitter.html)\n\n [Document(page_content='Foo \\n Some intro text about Foo.', metadata={'Header 1': 'Foo'}), Document(page_content='Bar main section \\n Some intro text about Bar.', metadata={'Header 2': 'Bar main section'}), Document(page_content='Bar subsection 1 \\n Some text about the first subtopic of Bar.', metadata={'Header 3': 'Bar subsection 1'}), Document(page_content='Bar subsection 2 \\n Some text about the second subtopic of Bar.', metadata={'Header 3': 'Bar subsection 2'}), Document(page_content='Baz \\n Some text about Baz \\n \\n \\n Some concluding text about Foo', metadata={'Header 2': 'Baz'})]\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/HTML_section_aware_splitter.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to split by HTML header\n\n](/v0.2/docs/how_to/HTML_header_metadata_splitter/)[\n\nNext\n\nHow to use the MultiQueryRetriever\n\n](/v0.2/docs/how_to/MultiQueryRetriever/)\n\n* [Description and motivation](#description-and-motivation)\n* [Usage examples](#usage-examples)\n * [1) How to split HTML strings:](#1-how-to-split-html-strings)\n * [2) How to constrain chunk sizes:](#2-how-to-constrain-chunk-sizes)"},"last_modified":{"kind":"null"}}},{"rowIdx":1333,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/custom_chat_model/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to create a custom chat model class\n\nOn this page\n\nHow to create a custom chat model class\n=======================================\n\nPrerequisites\n\nThis guide assumes familiarity with the following concepts:\n\n* [Chat models](/v0.2/docs/concepts/#chat-models)\n\nIn this guide, we'll learn how to create a custom chat model using LangChain abstractions.\n\nWrapping your LLM with the standard [`BaseChatModel`](https://api.python.langchain.com/en/latest/language_models/langchain_core.language_models.chat_models.BaseChatModel.html) interface allow you to use your LLM in existing LangChain programs with minimal code modifications!\n\nAs an bonus, your LLM will automatically become a LangChain `Runnable` and will benefit from some optimizations out of the box (e.g., batch via a threadpool), async support, the `astream_events` API, etc.\n\nInputs and outputs[​](#inputs-and-outputs \"Direct link to Inputs and outputs\")\n------------------------------------------------------------------------------\n\nFirst, we need to talk about **messages**, which are the inputs and outputs of chat models.\n\n### Messages[​](#messages \"Direct link to Messages\")\n\nChat models take messages as inputs and return a message as output.\n\nLangChain has a few [built-in message types](/v0.2/docs/concepts/#message-types):\n\nMessage Type\n\nDescription\n\n`SystemMessage`\n\nUsed for priming AI behavior, usually passed in as the first of a sequence of input messages.\n\n`HumanMessage`\n\nRepresents a message from a person interacting with the chat model.\n\n`AIMessage`\n\nRepresents a message from the chat model. This can be either text or a request to invoke a tool.\n\n`FunctionMessage` / `ToolMessage`\n\nMessage for passing the results of tool invocation back to the model.\n\n`AIMessageChunk` / `HumanMessageChunk` / ...\n\nChunk variant of each type of message.\n\n::: {.callout-note} `ToolMessage` and `FunctionMessage` closely follow OpenAI's `function` and `tool` roles.\n\nThis is a rapidly developing field and as more models add function calling capabilities. Expect that there will be additions to this schema. :::\n\n from langchain_core.messages import ( AIMessage, BaseMessage, FunctionMessage, HumanMessage, SystemMessage, ToolMessage,)\n\n**API Reference:**[AIMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html) | [BaseMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.base.BaseMessage.html) | [FunctionMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.function.FunctionMessage.html) | [HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) | [SystemMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.system.SystemMessage.html) | [ToolMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolMessage.html)\n\n### Streaming Variant[​](#streaming-variant \"Direct link to Streaming Variant\")\n\nAll the chat messages have a streaming variant that contains `Chunk` in the name.\n\n from langchain_core.messages import ( AIMessageChunk, FunctionMessageChunk, HumanMessageChunk, SystemMessageChunk, ToolMessageChunk,)\n\n**API Reference:**[AIMessageChunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html) | [FunctionMessageChunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.function.FunctionMessageChunk.html) | [HumanMessageChunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessageChunk.html) | [SystemMessageChunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.system.SystemMessageChunk.html) | [ToolMessageChunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolMessageChunk.html)\n\nThese chunks are used when streaming output from chat models, and they all define an additive property!\n\n AIMessageChunk(content=\"Hello\") + AIMessageChunk(content=\" World!\")\n\n AIMessageChunk(content='Hello World!')\n\nBase Chat Model[​](#base-chat-model \"Direct link to Base Chat Model\")\n---------------------------------------------------------------------\n\nLet's implement a chat model that echoes back the first `n` characetrs of the last message in the prompt!\n\nTo do so, we will inherit from `BaseChatModel` and we'll need to implement the following:\n\nMethod/Property\n\nDescription\n\nRequired/Optional\n\n`_generate`\n\nUse to generate a chat result from a prompt\n\nRequired\n\n`_llm_type` (property)\n\nUsed to uniquely identify the type of the model. Used for logging.\n\nRequired\n\n`_identifying_params` (property)\n\nRepresent model parameterization for tracing purposes.\n\nOptional\n\n`_stream`\n\nUse to implement streaming.\n\nOptional\n\n`_agenerate`\n\nUse to implement a native async method.\n\nOptional\n\n`_astream`\n\nUse to implement async version of `_stream`.\n\nOptional\n\ntip\n\nThe `_astream` implementation uses `run_in_executor` to launch the sync `_stream` in a separate thread if `_stream` is implemented, otherwise it fallsback to use `_agenerate`.\n\nYou can use this trick if you want to reuse the `_stream` implementation, but if you're able to implement code that's natively async that's a better solution since that code will run with less overhead.\n\n### Implementation[​](#implementation \"Direct link to Implementation\")\n\n from typing import Any, AsyncIterator, Dict, Iterator, List, Optionalfrom langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun,)from langchain_core.language_models import BaseChatModel, SimpleChatModelfrom langchain_core.messages import AIMessageChunk, BaseMessage, HumanMessagefrom langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResultfrom langchain_core.runnables import run_in_executorclass CustomChatModelAdvanced(BaseChatModel): \"\"\"A custom chat model that echoes the first `n` characters of the input. When contributing an implementation to LangChain, carefully document the model including the initialization parameters, include an example of how to initialize the model and include any relevant links to the underlying models documentation or API. Example: .. code-block:: python model = CustomChatModel(n=2) result = model.invoke([HumanMessage(content=\"hello\")]) result = model.batch([[HumanMessage(content=\"hello\")], [HumanMessage(content=\"world\")]]) \"\"\" model_name: str \"\"\"The name of the model\"\"\" n: int \"\"\"The number of characters from the last message of the prompt to be echoed.\"\"\" def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: \"\"\"Override the _generate method to implement the chat model logic. This can be a call to an API, a call to a local model, or any other implementation that generates a response to the input prompt. Args: messages: the prompt composed of a list of messages. stop: a list of strings on which the model should stop generating. If generation stops due to a stop token, the stop token itself SHOULD BE INCLUDED as part of the output. This is not enforced across models right now, but it's a good practice to follow since it makes it much easier to parse the output of the model downstream and understand why generation stopped. run_manager: A run manager with callbacks for the LLM. \"\"\" # Replace this with actual logic to generate a response from a list # of messages. last_message = messages[-1] tokens = last_message.content[: self.n] message = AIMessage( content=tokens, additional_kwargs={}, # Used to add additional payload (e.g., function calling request) response_metadata={ # Use for response metadata \"time_in_seconds\": 3, }, ) ## generation = ChatGeneration(message=message) return ChatResult(generations=[generation]) def _stream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: \"\"\"Stream the output of the model. This method should be implemented if the model can generate output in a streaming fashion. If the model does not support streaming, do not implement it. In that case streaming requests will be automatically handled by the _generate method. Args: messages: the prompt composed of a list of messages. stop: a list of strings on which the model should stop generating. If generation stops due to a stop token, the stop token itself SHOULD BE INCLUDED as part of the output. This is not enforced across models right now, but it's a good practice to follow since it makes it much easier to parse the output of the model downstream and understand why generation stopped. run_manager: A run manager with callbacks for the LLM. \"\"\" last_message = messages[-1] tokens = last_message.content[: self.n] for token in tokens: chunk = ChatGenerationChunk(message=AIMessageChunk(content=token)) if run_manager: # This is optional in newer versions of LangChain # The on_llm_new_token will be called automatically run_manager.on_llm_new_token(token, chunk=chunk) yield chunk # Let's add some other information (e.g., response metadata) chunk = ChatGenerationChunk( message=AIMessageChunk(content=\"\", response_metadata={\"time_in_sec\": 3}) ) if run_manager: # This is optional in newer versions of LangChain # The on_llm_new_token will be called automatically run_manager.on_llm_new_token(token, chunk=chunk) yield chunk @property def _llm_type(self) -> str: \"\"\"Get the type of language model used by this chat model.\"\"\" return \"echoing-chat-model-advanced\" @property def _identifying_params(self) -> Dict[str, Any]: \"\"\"Return a dictionary of identifying parameters. This information is used by the LangChain callback system, which is used for tracing purposes make it possible to monitor LLMs. \"\"\" return { # The model name allows users to specify custom token counting # rules in LLM monitoring applications (e.g., in LangSmith users # can provide per token pricing for their model and monitor # costs for the given LLM.) \"model_name\": self.model_name, }\n\n**API Reference:**[AsyncCallbackManagerForLLMRun](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.AsyncCallbackManagerForLLMRun.html) | [CallbackManagerForLLMRun](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.CallbackManagerForLLMRun.html) | [BaseChatModel](https://api.python.langchain.com/en/latest/language_models/langchain_core.language_models.chat_models.BaseChatModel.html) | [SimpleChatModel](https://api.python.langchain.com/en/latest/language_models/langchain_core.language_models.chat_models.SimpleChatModel.html) | [AIMessageChunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html) | [BaseMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.base.BaseMessage.html) | [HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) | [ChatGeneration](https://api.python.langchain.com/en/latest/outputs/langchain_core.outputs.chat_generation.ChatGeneration.html) | [ChatGenerationChunk](https://api.python.langchain.com/en/latest/outputs/langchain_core.outputs.chat_generation.ChatGenerationChunk.html) | [ChatResult](https://api.python.langchain.com/en/latest/outputs/langchain_core.outputs.chat_result.ChatResult.html) | [run\\_in\\_executor](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.config.run_in_executor.html)\n\n### Let's test it 🧪[​](#lets-test-it- \"Direct link to Let's test it 🧪\")\n\nThe chat model will implement the standard `Runnable` interface of LangChain which many of the LangChain abstractions support!\n\n model = CustomChatModelAdvanced(n=3, model_name=\"my_custom_model\")model.invoke( [ HumanMessage(content=\"hello!\"), AIMessage(content=\"Hi there human!\"), HumanMessage(content=\"Meow!\"), ])\n\n AIMessage(content='Meo', response_metadata={'time_in_seconds': 3}, id='run-ddb42bd6-4fdd-4bd2-8be5-e11b67d3ac29-0')\n\n model.invoke(\"hello\")\n\n AIMessage(content='hel', response_metadata={'time_in_seconds': 3}, id='run-4d3cc912-44aa-454b-977b-ca02be06c12e-0')\n\n model.batch([\"hello\", \"goodbye\"])\n\n [AIMessage(content='hel', response_metadata={'time_in_seconds': 3}, id='run-9620e228-1912-4582-8aa1-176813afec49-0'), AIMessage(content='goo', response_metadata={'time_in_seconds': 3}, id='run-1ce8cdf8-6f75-448e-82f7-1bb4a121df93-0')]\n\n for chunk in model.stream(\"cat\"): print(chunk.content, end=\"|\")\n\n c|a|t||\n\nPlease see the implementation of `_astream` in the model! If you do not implement it, then no output will stream.!\n\n async for chunk in model.astream(\"cat\"): print(chunk.content, end=\"|\")\n\n c|a|t||\n\nLet's try to use the astream events API which will also help double check that all the callbacks were implemented!\n\n async for event in model.astream_events(\"cat\", version=\"v1\"): print(event)\n\n {'event': 'on_chat_model_start', 'run_id': '125a2a16-b9cd-40de-aa08-8aa9180b07d0', 'name': 'CustomChatModelAdvanced', 'tags': [], 'metadata': {}, 'data': {'input': 'cat'}}{'event': 'on_chat_model_stream', 'run_id': '125a2a16-b9cd-40de-aa08-8aa9180b07d0', 'tags': [], 'metadata': {}, 'name': 'CustomChatModelAdvanced', 'data': {'chunk': AIMessageChunk(content='c', id='run-125a2a16-b9cd-40de-aa08-8aa9180b07d0')}}{'event': 'on_chat_model_stream', 'run_id': '125a2a16-b9cd-40de-aa08-8aa9180b07d0', 'tags': [], 'metadata': {}, 'name': 'CustomChatModelAdvanced', 'data': {'chunk': AIMessageChunk(content='a', id='run-125a2a16-b9cd-40de-aa08-8aa9180b07d0')}}{'event': 'on_chat_model_stream', 'run_id': '125a2a16-b9cd-40de-aa08-8aa9180b07d0', 'tags': [], 'metadata': {}, 'name': 'CustomChatModelAdvanced', 'data': {'chunk': AIMessageChunk(content='t', id='run-125a2a16-b9cd-40de-aa08-8aa9180b07d0')}}{'event': 'on_chat_model_stream', 'run_id': '125a2a16-b9cd-40de-aa08-8aa9180b07d0', 'tags': [], 'metadata': {}, 'name': 'CustomChatModelAdvanced', 'data': {'chunk': AIMessageChunk(content='', response_metadata={'time_in_sec': 3}, id='run-125a2a16-b9cd-40de-aa08-8aa9180b07d0')}}{'event': 'on_chat_model_end', 'name': 'CustomChatModelAdvanced', 'run_id': '125a2a16-b9cd-40de-aa08-8aa9180b07d0', 'tags': [], 'metadata': {}, 'data': {'output': AIMessageChunk(content='cat', response_metadata={'time_in_sec': 3}, id='run-125a2a16-b9cd-40de-aa08-8aa9180b07d0')}}``````output/home/eugene/src/langchain/libs/core/langchain_core/_api/beta_decorator.py:87: LangChainBetaWarning: This API is in beta and may change in the future. warn_beta(\n\nContributing[​](#contributing \"Direct link to Contributing\")\n------------------------------------------------------------\n\nWe appreciate all chat model integration contributions.\n\nHere's a checklist to help make sure your contribution gets added to LangChain:\n\nDocumentation:\n\n* The model contains doc-strings for all initialization arguments, as these will be surfaced in the [APIReference](https://api.python.langchain.com/en/stable/langchain_api_reference.html).\n* The class doc-string for the model contains a link to the model API if the model is powered by a service.\n\nTests:\n\n* Add unit or integration tests to the overridden methods. Verify that `invoke`, `ainvoke`, `batch`, `stream` work if you've over-ridden the corresponding code.\n\nStreaming (if you're implementing it):\n\n* Implement the \\_stream method to get streaming working\n\nStop Token Behavior:\n\n* Stop token should be respected\n* Stop token should be INCLUDED as part of the response\n\nSecret API Keys:\n\n* If your model connects to an API it will likely accept API keys as part of its initialization. Use Pydantic's `SecretStr` type for secrets, so they don't get accidentally printed out when folks print the model.\n\nIdentifying Params:\n\n* Include a `model_name` in identifying params\n\nOptimizations:\n\nConsider providing native async support to reduce the overhead from the model!\n\n* Provided a native async of `_agenerate` (used by `ainvoke`)\n* Provided a native async of `_astream` (used by `astream`)\n\nNext steps[​](#next-steps \"Direct link to Next steps\")\n------------------------------------------------------\n\nYou've now learned how to create your own custom chat models.\n\nNext, check out the other how-to guides chat models in this section, like [how to get a model to return structured output](/v0.2/docs/how_to/structured_output/) or [how to track chat model token usage](/v0.2/docs/how_to/chat_token_usage_tracking/).\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/custom_chat_model.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to create custom callback handlers\n\n](/v0.2/docs/how_to/custom_callbacks/)[\n\nNext\n\nHow to create a custom LLM class\n\n](/v0.2/docs/how_to/custom_llm/)\n\n* [Inputs and outputs](#inputs-and-outputs)\n * [Messages](#messages)\n * [Streaming Variant](#streaming-variant)\n* [Base Chat Model](#base-chat-model)\n * [Implementation](#implementation)\n * [Let's test it 🧪](#lets-test-it-)\n* [Contributing](#contributing)\n* [Next steps](#next-steps)"},"last_modified":{"kind":"null"}}},{"rowIdx":1334,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/custom_callbacks/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to create custom callback handlers\n\nOn this page\n\nHow to create custom callback handlers\n======================================\n\nPrerequisites\n\nThis guide assumes familiarity with the following concepts:\n\n* [Callbacks](/v0.2/docs/concepts/#callbacks)\n\nLangChain has some built-in callback handlers, but you will often want to create your own handlers with custom logic.\n\nTo create a custom callback handler, we need to determine the [event(s)](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) we want our callback handler to handle as well as what we want our callback handler to do when the event is triggered. Then all we need to do is attach the callback handler to the object, for example via [the constructor](/v0.2/docs/how_to/callbacks_constructor/) or [at runtime](/v0.2/docs/how_to/callbacks_runtime/).\n\nIn the example below, we'll implement streaming with a custom handler.\n\nIn our custom callback handler `MyCustomHandler`, we implement the `on_llm_new_token` handler to print the token we have just received. We then attach our custom handler to the model object as a constructor callback.\n\n from langchain_anthropic import ChatAnthropicfrom langchain_core.callbacks import BaseCallbackHandlerfrom langchain_core.prompts import ChatPromptTemplateclass MyCustomHandler(BaseCallbackHandler): def on_llm_new_token(self, token: str, **kwargs) -> None: print(f\"My custom handler, token: {token}\")prompt = ChatPromptTemplate.from_messages([\"Tell me a joke about {animal}\"])# To enable streaming, we pass in `streaming=True` to the ChatModel constructor# Additionally, we pass in our custom handler as a list to the callbacks parametermodel = ChatAnthropic( model=\"claude-3-sonnet-20240229\", streaming=True, callbacks=[MyCustomHandler()])chain = prompt | modelresponse = chain.invoke({\"animal\": \"bears\"})\n\n**API Reference:**[ChatAnthropic](https://api.python.langchain.com/en/latest/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html) | [BaseCallbackHandler](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html)\n\n My custom handler, token: HereMy custom handler, token: 'sMy custom handler, token: aMy custom handler, token: bearMy custom handler, token: jokeMy custom handler, token: forMy custom handler, token: youMy custom handler, token: :My custom handler, token: WhyMy custom handler, token: diMy custom handler, token: d theMy custom handler, token: bearMy custom handler, token: dissolMy custom handler, token: veMy custom handler, token: inMy custom handler, token: waterMy custom handler, token: ?My custom handler, token: BecauseMy custom handler, token: itMy custom handler, token: wasMy custom handler, token: aMy custom handler, token: polarMy custom handler, token: bearMy custom handler, token: !\n\nYou can see [this reference page](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) for a list of events you can handle. Note that the `handle_chain_*` events run for most LCEL runnables.\n\nNext steps[​](#next-steps \"Direct link to Next steps\")\n------------------------------------------------------\n\nYou've now learned how to create your own custom callback handlers.\n\nNext, check out the other how-to guides in this section, such as [how to attach callbacks to a runnable](/v0.2/docs/how_to/callbacks_attach/).\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/custom_callbacks.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to do retrieval with contextual compression\n\n](/v0.2/docs/how_to/contextual_compression/)[\n\nNext\n\nHow to create a custom chat model class\n\n](/v0.2/docs/how_to/custom_chat_model/)\n\n* [Next steps](#next-steps)"},"last_modified":{"kind":"null"}}},{"rowIdx":1335,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/custom_llm/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to create a custom LLM class\n\nOn this page\n\nHow to create a custom LLM class\n================================\n\nThis notebook goes over how to create a custom LLM wrapper, in case you want to use your own LLM or a different wrapper than one that is supported in LangChain.\n\nWrapping your LLM with the standard `LLM` interface allow you to use your LLM in existing LangChain programs with minimal code modifications!\n\nAs an bonus, your LLM will automatically become a LangChain `Runnable` and will benefit from some optimizations out of the box, async support, the `astream_events` API, etc.\n\nImplementation[​](#implementation \"Direct link to Implementation\")\n------------------------------------------------------------------\n\nThere are only two required things that a custom LLM needs to implement:\n\nMethod\n\nDescription\n\n`_call`\n\nTakes in a string and some optional stop words, and returns a string. Used by `invoke`.\n\n`_llm_type`\n\nA property that returns a string, used for logging purposes only.\n\nOptional implementations:\n\nMethod\n\nDescription\n\n`_identifying_params`\n\nUsed to help with identifying the model and printing the LLM; should return a dictionary. This is a **@property**.\n\n`_acall`\n\nProvides an async native implementation of `_call`, used by `ainvoke`.\n\n`_stream`\n\nMethod to stream the output token by token.\n\n`_astream`\n\nProvides an async native implementation of `_stream`; in newer LangChain versions, defaults to `_stream`.\n\nLet's implement a simple custom LLM that just returns the first n characters of the input.\n\n from typing import Any, Dict, Iterator, List, Mapping, Optionalfrom langchain_core.callbacks.manager import CallbackManagerForLLMRunfrom langchain_core.language_models.llms import LLMfrom langchain_core.outputs import GenerationChunkclass CustomLLM(LLM): \"\"\"A custom chat model that echoes the first `n` characters of the input. When contributing an implementation to LangChain, carefully document the model including the initialization parameters, include an example of how to initialize the model and include any relevant links to the underlying models documentation or API. Example: .. code-block:: python model = CustomChatModel(n=2) result = model.invoke([HumanMessage(content=\"hello\")]) result = model.batch([[HumanMessage(content=\"hello\")], [HumanMessage(content=\"world\")]]) \"\"\" n: int \"\"\"The number of characters from the last message of the prompt to be echoed.\"\"\" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: \"\"\"Run the LLM on the given input. Override this method to implement the LLM logic. Args: prompt: The prompt to generate from. stop: Stop words to use when generating. Model output is cut off at the first occurrence of any of the stop substrings. If stop tokens are not supported consider raising NotImplementedError. run_manager: Callback manager for the run. **kwargs: Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns: The model output as a string. Actual completions SHOULD NOT include the prompt. \"\"\" if stop is not None: raise ValueError(\"stop kwargs are not permitted.\") return prompt[: self.n] def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: \"\"\"Stream the LLM on the given prompt. This method should be overridden by subclasses that support streaming. If not implemented, the default behavior of calls to stream will be to fallback to the non-streaming version of the model and return the output as a single chunk. Args: prompt: The prompt to generate from. stop: Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. run_manager: Callback manager for the run. **kwargs: Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns: An iterator of GenerationChunks. \"\"\" for char in prompt[: self.n]: chunk = GenerationChunk(text=char) if run_manager: run_manager.on_llm_new_token(chunk.text, chunk=chunk) yield chunk @property def _identifying_params(self) -> Dict[str, Any]: \"\"\"Return a dictionary of identifying parameters.\"\"\" return { # The model name allows users to specify custom token counting # rules in LLM monitoring applications (e.g., in LangSmith users # can provide per token pricing for their model and monitor # costs for the given LLM.) \"model_name\": \"CustomChatModel\", } @property def _llm_type(self) -> str: \"\"\"Get the type of language model used by this chat model. Used for logging purposes only.\"\"\" return \"custom\"\n\n**API Reference:**[CallbackManagerForLLMRun](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.CallbackManagerForLLMRun.html) | [LLM](https://api.python.langchain.com/en/latest/language_models/langchain_core.language_models.llms.LLM.html) | [GenerationChunk](https://api.python.langchain.com/en/latest/outputs/langchain_core.outputs.generation.GenerationChunk.html)\n\n### Let's test it 🧪[​](#lets-test-it- \"Direct link to Let's test it 🧪\")\n\nThis LLM will implement the standard `Runnable` interface of LangChain which many of the LangChain abstractions support!\n\n llm = CustomLLM(n=5)print(llm)\n\n \u001b[1mCustomLLM\u001b[0mParams: {'model_name': 'CustomChatModel'}\n\n llm.invoke(\"This is a foobar thing\")\n\n 'This '\n\n await llm.ainvoke(\"world\")\n\n 'world'\n\n llm.batch([\"woof woof woof\", \"meow meow meow\"])\n\n ['woof ', 'meow ']\n\n await llm.abatch([\"woof woof woof\", \"meow meow meow\"])\n\n ['woof ', 'meow ']\n\n async for token in llm.astream(\"hello\"): print(token, end=\"|\", flush=True)\n\n h|e|l|l|o|\n\nLet's confirm that in integrates nicely with other `LangChain` APIs.\n\n from langchain_core.prompts import ChatPromptTemplate\n\n**API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html)\n\n prompt = ChatPromptTemplate.from_messages( [(\"system\", \"you are a bot\"), (\"human\", \"{input}\")])\n\n llm = CustomLLM(n=7)chain = prompt | llm\n\n idx = 0async for event in chain.astream_events({\"input\": \"hello there!\"}, version=\"v1\"): print(event) idx += 1 if idx > 7: # Truncate break\n\n {'event': 'on_chain_start', 'run_id': '05f24b4f-7ea3-4fb6-8417-3aa21633462f', 'name': 'RunnableSequence', 'tags': [], 'metadata': {}, 'data': {'input': {'input': 'hello there!'}}}{'event': 'on_prompt_start', 'name': 'ChatPromptTemplate', 'run_id': '7e996251-a926-4344-809e-c425a9846d21', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'input': {'input': 'hello there!'}}}{'event': 'on_prompt_end', 'name': 'ChatPromptTemplate', 'run_id': '7e996251-a926-4344-809e-c425a9846d21', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'input': {'input': 'hello there!'}, 'output': ChatPromptValue(messages=[SystemMessage(content='you are a bot'), HumanMessage(content='hello there!')])}}{'event': 'on_llm_start', 'name': 'CustomLLM', 'run_id': 'a8766beb-10f4-41de-8750-3ea7cf0ca7e2', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'input': {'prompts': ['System: you are a bot\\nHuman: hello there!']}}}{'event': 'on_llm_stream', 'name': 'CustomLLM', 'run_id': 'a8766beb-10f4-41de-8750-3ea7cf0ca7e2', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': 'S'}}{'event': 'on_chain_stream', 'run_id': '05f24b4f-7ea3-4fb6-8417-3aa21633462f', 'tags': [], 'metadata': {}, 'name': 'RunnableSequence', 'data': {'chunk': 'S'}}{'event': 'on_llm_stream', 'name': 'CustomLLM', 'run_id': 'a8766beb-10f4-41de-8750-3ea7cf0ca7e2', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': 'y'}}{'event': 'on_chain_stream', 'run_id': '05f24b4f-7ea3-4fb6-8417-3aa21633462f', 'tags': [], 'metadata': {}, 'name': 'RunnableSequence', 'data': {'chunk': 'y'}}\n\nContributing[​](#contributing \"Direct link to Contributing\")\n------------------------------------------------------------\n\nWe appreciate all chat model integration contributions.\n\nHere's a checklist to help make sure your contribution gets added to LangChain:\n\nDocumentation:\n\n* The model contains doc-strings for all initialization arguments, as these will be surfaced in the [APIReference](https://api.python.langchain.com/en/stable/langchain_api_reference.html).\n* The class doc-string for the model contains a link to the model API if the model is powered by a service.\n\nTests:\n\n* Add unit or integration tests to the overridden methods. Verify that `invoke`, `ainvoke`, `batch`, `stream` work if you've over-ridden the corresponding code.\n\nStreaming (if you're implementing it):\n\n* Make sure to invoke the `on_llm_new_token` callback\n* `on_llm_new_token` is invoked BEFORE yielding the chunk\n\nStop Token Behavior:\n\n* Stop token should be respected\n* Stop token should be INCLUDED as part of the response\n\nSecret API Keys:\n\n* If your model connects to an API it will likely accept API keys as part of its initialization. Use Pydantic's `SecretStr` type for secrets, so they don't get accidentally printed out when folks print the model.\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/custom_llm.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to create a custom chat model class\n\n](/v0.2/docs/how_to/custom_chat_model/)[\n\nNext\n\nCustom Retriever\n\n](/v0.2/docs/how_to/custom_retriever/)\n\n* [Implementation](#implementation)\n * [Let's test it 🧪](#lets-test-it-)\n* [Contributing](#contributing)"},"last_modified":{"kind":"null"}}},{"rowIdx":1336,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/MultiQueryRetriever/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to use the MultiQueryRetriever\n\nOn this page\n\nHow to use the MultiQueryRetriever\n==================================\n\nDistance-based vector database retrieval embeds (represents) queries in high-dimensional space and finds similar embedded documents based on a distance metric. But, retrieval may produce different results with subtle changes in query wording, or if the embeddings do not capture the semantics of the data well. Prompt engineering / tuning is sometimes done to manually address these problems, but can be tedious.\n\nThe [MultiQueryRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.multi_query.MultiQueryRetriever.html) automates the process of prompt tuning by using an LLM to generate multiple queries from different perspectives for a given user input query. For each query, it retrieves a set of relevant documents and takes the unique union across all queries to get a larger set of potentially relevant documents. By generating multiple perspectives on the same question, the `MultiQueryRetriever` can mitigate some of the limitations of the distance-based retrieval and get a richer set of results.\n\nLet's build a vectorstore using the [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post by Lilian Weng from the [RAG tutorial](/v0.2/docs/tutorials/rag/):\n\n # Build a sample vectorDBfrom langchain_chroma import Chromafrom langchain_community.document_loaders import WebBaseLoaderfrom langchain_openai import OpenAIEmbeddingsfrom langchain_text_splitters import RecursiveCharacterTextSplitter# Load blog postloader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")data = loader.load()# Splittext_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)splits = text_splitter.split_documents(data)# VectorDBembedding = OpenAIEmbeddings()vectordb = Chroma.from_documents(documents=splits, embedding=embedding)\n\n**API Reference:**[WebBaseLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.web_base.WebBaseLoader.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) | [RecursiveCharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.RecursiveCharacterTextSplitter.html)\n\n#### Simple usage[​](#simple-usage \"Direct link to Simple usage\")\n\nSpecify the LLM to use for query generation, and the retriever will do the rest.\n\n from langchain.retrievers.multi_query import MultiQueryRetrieverfrom langchain_openai import ChatOpenAIquestion = \"What are the approaches to Task Decomposition?\"llm = ChatOpenAI(temperature=0)retriever_from_llm = MultiQueryRetriever.from_llm( retriever=vectordb.as_retriever(), llm=llm)\n\n**API Reference:**[MultiQueryRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.multi_query.MultiQueryRetriever.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html)\n\n # Set logging for the queriesimport logginglogging.basicConfig()logging.getLogger(\"langchain.retrievers.multi_query\").setLevel(logging.INFO)\n\n unique_docs = retriever_from_llm.invoke(question)len(unique_docs)\n\n INFO:langchain.retrievers.multi_query:Generated queries: ['1. How can Task Decomposition be achieved through different methods?', '2. What strategies are commonly used for Task Decomposition?', '3. What are the various techniques for breaking down tasks in Task Decomposition?']\n\n 5\n\nNote that the underlying queries generated by the retriever are logged at the `INFO` level.\n\n#### Supplying your own prompt[​](#supplying-your-own-prompt \"Direct link to Supplying your own prompt\")\n\nUnder the hood, `MultiQueryRetriever` generates queries using a specific [prompt](https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/multi_query.html#MultiQueryRetriever). To customize this prompt:\n\n1. Make a [PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html) with an input variable for the question;\n2. Implement an [output parser](/v0.2/docs/concepts/#output-parsers) like the one below to split the result into a list of queries.\n\nThe prompt and output parser together must support the generation of a list of queries.\n\n from typing import Listfrom langchain_core.output_parsers import BaseOutputParserfrom langchain_core.prompts import PromptTemplatefrom langchain_core.pydantic_v1 import BaseModel, Field# Output parser will split the LLM result into a list of queriesclass LineListOutputParser(BaseOutputParser[List[str]]): \"\"\"Output parser for a list of lines.\"\"\" def parse(self, text: str) -> List[str]: lines = text.strip().split(\"\\n\") return linesoutput_parser = LineListOutputParser()QUERY_PROMPT = PromptTemplate( input_variables=[\"question\"], template=\"\"\"You are an AI language model assistant. Your task is to generate five different versions of the given user question to retrieve relevant documents from a vector database. By generating multiple perspectives on the user question, your goal is to help the user overcome some of the limitations of the distance-based similarity search. Provide these alternative questions separated by newlines. Original question: {question}\"\"\",)llm = ChatOpenAI(temperature=0)# Chainllm_chain = QUERY_PROMPT | llm | output_parser# Other inputsquestion = \"What are the approaches to Task Decomposition?\"\n\n**API Reference:**[BaseOutputParser](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.base.BaseOutputParser.html) | [PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html)\n\n # Runretriever = MultiQueryRetriever( retriever=vectordb.as_retriever(), llm_chain=llm_chain, parser_key=\"lines\") # \"lines\" is the key (attribute name) of the parsed output# Resultsunique_docs = retriever.invoke(\"What does the course say about regression?\")len(unique_docs)\n\n INFO:langchain.retrievers.multi_query:Generated queries: ['1. Can you provide insights on regression from the course material?', '2. How is regression discussed in the course content?', '3. What information does the course offer about regression?', '4. In what way is regression covered in the course?', '5. What are the teachings of the course regarding regression?']\n\n 9\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/MultiQueryRetriever.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to split by HTML sections\n\n](/v0.2/docs/how_to/HTML_section_aware_splitter/)[\n\nNext\n\nHow to add scores to retriever results\n\n](/v0.2/docs/how_to/add_scores_retriever/)"},"last_modified":{"kind":"null"}}},{"rowIdx":1337,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/custom_retriever/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* Custom Retriever\n\nOn this page\n\nHow to create a custom Retriever\n================================\n\nOverview[​](#overview \"Direct link to Overview\")\n------------------------------------------------\n\nMany LLM applications involve retrieving information from external data sources using a `Retriever`.\n\nA retriever is responsible for retrieving a list of relevant `Documents` to a given user `query`.\n\nThe retrieved documents are often formatted into prompts that are fed into an LLM, allowing the LLM to use the information in the to generate an appropriate response (e.g., answering a user question based on a knowledge base).\n\nInterface[​](#interface \"Direct link to Interface\")\n---------------------------------------------------\n\nTo create your own retriever, you need to extend the `BaseRetriever` class and implement the following methods:\n\nMethod\n\nDescription\n\nRequired/Optional\n\n`_get_relevant_documents`\n\nGet documents relevant to a query.\n\nRequired\n\n`_aget_relevant_documents`\n\nImplement to provide async native support.\n\nOptional\n\nThe logic inside of `_get_relevant_documents` can involve arbitrary calls to a database or to the web using requests.\n\ntip\n\nBy inherting from `BaseRetriever`, your retriever automatically becomes a LangChain [Runnable](/v0.2/docs/concepts/#interface) and will gain the standard `Runnable` functionality out of the box!\n\ninfo\n\nYou can use a `RunnableLambda` or `RunnableGenerator` to implement a retriever.\n\nThe main benefit of implementing a retriever as a `BaseRetriever` vs. a `RunnableLambda` (a custom [runnable function](/v0.2/docs/how_to/functions/)) is that a `BaseRetriever` is a well known LangChain entity so some tooling for monitoring may implement specialized behavior for retrievers. Another difference is that a `BaseRetriever` will behave slightly differently from `RunnableLambda` in some APIs; e.g., the `start` event in `astream_events` API will be `on_retriever_start` instead of `on_chain_start`.\n\nExample[​](#example \"Direct link to Example\")\n---------------------------------------------\n\nLet's implement a toy retriever that returns all documents whose text contains the text in the user query.\n\n from typing import Listfrom langchain_core.callbacks import CallbackManagerForRetrieverRunfrom langchain_core.documents import Documentfrom langchain_core.retrievers import BaseRetrieverclass ToyRetriever(BaseRetriever): \"\"\"A toy retriever that contains the top k documents that contain the user query. This retriever only implements the sync method _get_relevant_documents. If the retriever were to involve file access or network access, it could benefit from a native async implementation of `_aget_relevant_documents`. As usual, with Runnables, there's a default async implementation that's provided that delegates to the sync implementation running on another thread. \"\"\" documents: List[Document] \"\"\"List of documents to retrieve from.\"\"\" k: int \"\"\"Number of top results to return\"\"\" def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: \"\"\"Sync implementations for retriever.\"\"\" matching_documents = [] for document in documents: if len(matching_documents) > self.k: return matching_documents if query.lower() in document.page_content.lower(): matching_documents.append(document) return matching_documents # Optional: Provide a more efficient native implementation by overriding # _aget_relevant_documents # async def _aget_relevant_documents( # self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun # ) -> List[Document]: # \"\"\"Asynchronously get documents relevant to a query. # Args: # query: String to find relevant documents for # run_manager: The callbacks handler to use # Returns: # List of relevant documents # \"\"\"\n\n**API Reference:**[CallbackManagerForRetrieverRun](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.CallbackManagerForRetrieverRun.html) | [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) | [BaseRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain_core.retrievers.BaseRetriever.html)\n\nTest it 🧪[​](#test-it- \"Direct link to Test it 🧪\")\n----------------------------------------------------\n\n documents = [ Document( page_content=\"Dogs are great companions, known for their loyalty and friendliness.\", metadata={\"type\": \"dog\", \"trait\": \"loyalty\"}, ), Document( page_content=\"Cats are independent pets that often enjoy their own space.\", metadata={\"type\": \"cat\", \"trait\": \"independence\"}, ), Document( page_content=\"Goldfish are popular pets for beginners, requiring relatively simple care.\", metadata={\"type\": \"fish\", \"trait\": \"low maintenance\"}, ), Document( page_content=\"Parrots are intelligent birds capable of mimicking human speech.\", metadata={\"type\": \"bird\", \"trait\": \"intelligence\"}, ), Document( page_content=\"Rabbits are social animals that need plenty of space to hop around.\", metadata={\"type\": \"rabbit\", \"trait\": \"social\"}, ),]retriever = ToyRetriever(documents=documents, k=3)\n\n retriever.invoke(\"that\")\n\n [Document(page_content='Cats are independent pets that often enjoy their own space.', metadata={'type': 'cat', 'trait': 'independence'}), Document(page_content='Rabbits are social animals that need plenty of space to hop around.', metadata={'type': 'rabbit', 'trait': 'social'})]\n\nIt's a **runnable** so it'll benefit from the standard Runnable Interface! 🤩\n\n await retriever.ainvoke(\"that\")\n\n [Document(page_content='Cats are independent pets that often enjoy their own space.', metadata={'type': 'cat', 'trait': 'independence'}), Document(page_content='Rabbits are social animals that need plenty of space to hop around.', metadata={'type': 'rabbit', 'trait': 'social'})]\n\n retriever.batch([\"dog\", \"cat\"])\n\n [[Document(page_content='Dogs are great companions, known for their loyalty and friendliness.', metadata={'type': 'dog', 'trait': 'loyalty'})], [Document(page_content='Cats are independent pets that often enjoy their own space.', metadata={'type': 'cat', 'trait': 'independence'})]]\n\n async for event in retriever.astream_events(\"bar\", version=\"v1\"): print(event)\n\n {'event': 'on_retriever_start', 'run_id': 'f96f268d-8383-4921-b175-ca583924d9ff', 'name': 'ToyRetriever', 'tags': [], 'metadata': {}, 'data': {'input': 'bar'}}{'event': 'on_retriever_stream', 'run_id': 'f96f268d-8383-4921-b175-ca583924d9ff', 'tags': [], 'metadata': {}, 'name': 'ToyRetriever', 'data': {'chunk': []}}{'event': 'on_retriever_end', 'name': 'ToyRetriever', 'run_id': 'f96f268d-8383-4921-b175-ca583924d9ff', 'tags': [], 'metadata': {}, 'data': {'output': []}}\n\nContributing[​](#contributing \"Direct link to Contributing\")\n------------------------------------------------------------\n\nWe appreciate contributions of interesting retrievers!\n\nHere's a checklist to help make sure your contribution gets added to LangChain:\n\nDocumentation:\n\n* The retriever contains doc-strings for all initialization arguments, as these will be surfaced in the [API Reference](https://api.python.langchain.com/en/stable/langchain_api_reference.html).\n* The class doc-string for the model contains a link to any relevant APIs used for the retriever (e.g., if the retriever is retrieving from wikipedia, it'll be good to link to the wikipedia API!)\n\nTests:\n\n* Add unit or integration tests to verify that `invoke` and `ainvoke` work.\n\nOptimizations:\n\nIf the retriever is connecting to external data sources (e.g., an API or a file), it'll almost certainly benefit from an async native optimization!\n\n* Provide a native async implementation of `_aget_relevant_documents` (used by `ainvoke`)\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/custom_retriever.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to create a custom LLM class\n\n](/v0.2/docs/how_to/custom_llm/)[\n\nNext\n\nHow to create custom tools\n\n](/v0.2/docs/how_to/custom_tools/)\n\n* [Overview](#overview)\n* [Interface](#interface)\n* [Example](#example)\n* [Test it 🧪](#test-it-)\n* [Contributing](#contributing)"},"last_modified":{"kind":"null"}}},{"rowIdx":1338,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/add_scores_retriever/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to add scores to retriever results\n\nOn this page\n\nHow to add scores to retriever results\n======================================\n\nRetrievers will return sequences of [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) objects, which by default include no information about the process that retrieved them (e.g., a similarity score against a query). Here we demonstrate how to add retrieval scores to the `.metadata` of documents:\n\n1. From [vectorstore retrievers](/v0.2/docs/how_to/vectorstore_retriever/);\n2. From higher-order LangChain retrievers, such as [SelfQueryRetriever](/v0.2/docs/how_to/self_query/) or [MultiVectorRetriever](/v0.2/docs/how_to/multi_vector/).\n\nFor (1), we will implement a short wrapper function around the corresponding vector store. For (2), we will update a method of the corresponding class.\n\nCreate vector store[​](#create-vector-store \"Direct link to Create vector store\")\n---------------------------------------------------------------------------------\n\nFirst we populate a vector store with some data. We will use a [PineconeVectorStore](https://api.python.langchain.com/en/latest/vectorstores/langchain_pinecone.vectorstores.PineconeVectorStore.html), but this guide is compatible with any LangChain vector store that implements a `.similarity_search_with_score` method.\n\n from langchain_core.documents import Documentfrom langchain_openai import OpenAIEmbeddingsfrom langchain_pinecone import PineconeVectorStoredocs = [ Document( page_content=\"A bunch of scientists bring back dinosaurs and mayhem breaks loose\", metadata={\"year\": 1993, \"rating\": 7.7, \"genre\": \"science fiction\"}, ), Document( page_content=\"Leo DiCaprio gets lost in a dream within a dream within a dream within a ...\", metadata={\"year\": 2010, \"director\": \"Christopher Nolan\", \"rating\": 8.2}, ), Document( page_content=\"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea\", metadata={\"year\": 2006, \"director\": \"Satoshi Kon\", \"rating\": 8.6}, ), Document( page_content=\"A bunch of normal-sized women are supremely wholesome and some men pine after them\", metadata={\"year\": 2019, \"director\": \"Greta Gerwig\", \"rating\": 8.3}, ), Document( page_content=\"Toys come alive and have a blast doing so\", metadata={\"year\": 1995, \"genre\": \"animated\"}, ), Document( page_content=\"Three men walk into the Zone, three men walk out of the Zone\", metadata={ \"year\": 1979, \"director\": \"Andrei Tarkovsky\", \"genre\": \"thriller\", \"rating\": 9.9, }, ),]vectorstore = PineconeVectorStore.from_documents( docs, index_name=\"sample\", embedding=OpenAIEmbeddings())\n\n**API Reference:**[Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) | [PineconeVectorStore](https://api.python.langchain.com/en/latest/vectorstores/langchain_pinecone.vectorstores.PineconeVectorStore.html)\n\nRetriever[​](#retriever \"Direct link to Retriever\")\n---------------------------------------------------\n\nTo obtain scores from a vector store retriever, we wrap the underlying vector store's `.similarity_search_with_score` method in a short function that packages scores into the associated document's metadata.\n\nWe add a `@chain` decorator to the function to create a [Runnable](/v0.2/docs/concepts/#langchain-expression-language) that can be used similarly to a typical retriever.\n\n from typing import Listfrom langchain_core.documents import Documentfrom langchain_core.runnables import chain@chaindef retriever(query: str) -> List[Document]: docs, scores = zip(*vectorstore.similarity_search_with_score(query)) for doc, score in zip(docs, scores): doc.metadata[\"score\"] = score return docs\n\n**API Reference:**[Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) | [chain](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.chain.html)\n\n result = retriever.invoke(\"dinosaur\")result\n\n (Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'genre': 'science fiction', 'rating': 7.7, 'year': 1993.0, 'score': 0.84429127}), Document(page_content='Toys come alive and have a blast doing so', metadata={'genre': 'animated', 'year': 1995.0, 'score': 0.792038262}), Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'director': 'Andrei Tarkovsky', 'genre': 'thriller', 'rating': 9.9, 'year': 1979.0, 'score': 0.751571238}), Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'director': 'Satoshi Kon', 'rating': 8.6, 'year': 2006.0, 'score': 0.747471571}))\n\nNote that similarity scores from the retrieval step are included in the metadata of the above documents.\n\nSelfQueryRetriever[​](#selfqueryretriever \"Direct link to SelfQueryRetriever\")\n------------------------------------------------------------------------------\n\n`SelfQueryRetriever` will use a LLM to generate a query that is potentially structured-- for example, it can construct filters for the retrieval on top of the usual semantic-similarity driven selection. See [this guide](/v0.2/docs/how_to/self_query/) for more detail.\n\n`SelfQueryRetriever` includes a short (1 - 2 line) method `_get_docs_with_query` that executes the `vectorstore` search. We can subclass `SelfQueryRetriever` and override this method to propagate similarity scores.\n\nFirst, following the [how-to guide](/v0.2/docs/how_to/self_query/), we will need to establish some metadata on which to filter:\n\n from langchain.chains.query_constructor.base import AttributeInfofrom langchain.retrievers.self_query.base import SelfQueryRetrieverfrom langchain_openai import ChatOpenAImetadata_field_info = [ AttributeInfo( name=\"genre\", description=\"The genre of the movie. One of ['science fiction', 'comedy', 'drama', 'thriller', 'romance', 'action', 'animated']\", type=\"string\", ), AttributeInfo( name=\"year\", description=\"The year the movie was released\", type=\"integer\", ), AttributeInfo( name=\"director\", description=\"The name of the movie director\", type=\"string\", ), AttributeInfo( name=\"rating\", description=\"A 1-10 rating for the movie\", type=\"float\" ),]document_content_description = \"Brief summary of a movie\"llm = ChatOpenAI(temperature=0)\n\n**API Reference:**[AttributeInfo](https://api.python.langchain.com/en/latest/chains/langchain.chains.query_constructor.schema.AttributeInfo.html) | [SelfQueryRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.self_query.base.SelfQueryRetriever.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html)\n\nWe then override the `_get_docs_with_query` to use the `similarity_search_with_score` method of the underlying vector store:\n\n from typing import Any, Dictclass CustomSelfQueryRetriever(SelfQueryRetriever): def _get_docs_with_query( self, query: str, search_kwargs: Dict[str, Any] ) -> List[Document]: \"\"\"Get docs, adding score information.\"\"\" docs, scores = zip( *vectorstore.similarity_search_with_score(query, **search_kwargs) ) for doc, score in zip(docs, scores): doc.metadata[\"score\"] = score return docs\n\nInvoking this retriever will now include similarity scores in the document metadata. Note that the underlying structured-query capabilities of `SelfQueryRetriever` are retained.\n\n retriever = CustomSelfQueryRetriever.from_llm( llm, vectorstore, document_content_description, metadata_field_info,)result = retriever.invoke(\"dinosaur movie with rating less than 8\")result\n\n (Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'genre': 'science fiction', 'rating': 7.7, 'year': 1993.0, 'score': 0.84429127}),)\n\nMultiVectorRetriever[​](#multivectorretriever \"Direct link to MultiVectorRetriever\")\n------------------------------------------------------------------------------------\n\n`MultiVectorRetriever` allows you to associate multiple vectors with a single document. This can be useful in a number of applications. For example, we can index small chunks of a larger document and run the retrieval on the chunks, but return the larger \"parent\" document when invoking the retriever. [ParentDocumentRetriever](/v0.2/docs/how_to/parent_document_retriever/), a subclass of `MultiVectorRetriever`, includes convenience methods for populating a vector store to support this. Further applications are detailed in this [how-to guide](/v0.2/docs/how_to/multi_vector/).\n\nTo propagate similarity scores through this retriever, we can again subclass `MultiVectorRetriever` and override a method. This time we will override `_get_relevant_documents`.\n\nFirst, we prepare some fake data. We generate fake \"whole documents\" and store them in a document store; here we will use a simple [InMemoryStore](https://api.python.langchain.com/en/latest/stores/langchain_core.stores.InMemoryBaseStore.html).\n\n from langchain.storage import InMemoryStorefrom langchain_text_splitters import RecursiveCharacterTextSplitter# The storage layer for the parent documentsdocstore = InMemoryStore()fake_whole_documents = [ (\"fake_id_1\", Document(page_content=\"fake whole document 1\")), (\"fake_id_2\", Document(page_content=\"fake whole document 2\")),]docstore.mset(fake_whole_documents)\n\n**API Reference:**[InMemoryStore](https://api.python.langchain.com/en/latest/stores/langchain_core.stores.InMemoryStore.html) | [RecursiveCharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.RecursiveCharacterTextSplitter.html)\n\nNext we will add some fake \"sub-documents\" to our vector store. We can link these sub-documents to the parent documents by populating the `\"doc_id\"` key in its metadata.\n\n docs = [ Document( page_content=\"A snippet from a larger document discussing cats.\", metadata={\"doc_id\": \"fake_id_1\"}, ), Document( page_content=\"A snippet from a larger document discussing discourse.\", metadata={\"doc_id\": \"fake_id_1\"}, ), Document( page_content=\"A snippet from a larger document discussing chocolate.\", metadata={\"doc_id\": \"fake_id_2\"}, ),]vectorstore.add_documents(docs)\n\n ['62a85353-41ff-4346-bff7-be6c8ec2ed89', '5d4a0e83-4cc5-40f1-bc73-ed9cbad0ee15', '8c1d9a56-120f-45e4-ba70-a19cd19a38f4']\n\nTo propagate the scores, we subclass `MultiVectorRetriever` and override its `_get_relevant_documents` method. Here we will make two changes:\n\n1. We will add similarity scores to the metadata of the corresponding \"sub-documents\" using the `similarity_search_with_score` method of the underlying vector store as above;\n2. We will include a list of these sub-documents in the metadata of the retrieved parent document. This surfaces what snippets of text were identified by the retrieval, together with their corresponding similarity scores.\n\n from collections import defaultdictfrom langchain.retrievers import MultiVectorRetrieverfrom langchain_core.callbacks import CallbackManagerForRetrieverRunclass CustomMultiVectorRetriever(MultiVectorRetriever): def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: \"\"\"Get documents relevant to a query. Args: query: String to find relevant documents for run_manager: The callbacks handler to use Returns: List of relevant documents \"\"\" results = self.vectorstore.similarity_search_with_score( query, **self.search_kwargs ) # Map doc_ids to list of sub-documents, adding scores to metadata id_to_doc = defaultdict(list) for doc, score in results: doc_id = doc.metadata.get(\"doc_id\") if doc_id: doc.metadata[\"score\"] = score id_to_doc[doc_id].append(doc) # Fetch documents corresponding to doc_ids, retaining sub_docs in metadata docs = [] for _id, sub_docs in id_to_doc.items(): docstore_docs = self.docstore.mget([_id]) if docstore_docs: if doc := docstore_docs[0]: doc.metadata[\"sub_docs\"] = sub_docs docs.append(doc) return docs\n\n**API Reference:**[MultiVectorRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.multi_vector.MultiVectorRetriever.html) | [CallbackManagerForRetrieverRun](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.CallbackManagerForRetrieverRun.html)\n\nInvoking this retriever, we can see that it identifies the correct parent document, including the relevant snippet from the sub-document with similarity score.\n\n retriever = CustomMultiVectorRetriever(vectorstore=vectorstore, docstore=docstore)retriever.invoke(\"cat\")\n\n [Document(page_content='fake whole document 1', metadata={'sub_docs': [Document(page_content='A snippet from a larger document discussing cats.', metadata={'doc_id': 'fake_id_1', 'score': 0.831276655})]})]\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/add_scores_retriever.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to use the MultiQueryRetriever\n\n](/v0.2/docs/how_to/MultiQueryRetriever/)[\n\nNext\n\nCaching\n\n](/v0.2/docs/how_to/caching_embeddings/)\n\n* [Create vector store](#create-vector-store)\n* [Retriever](#retriever)\n* [SelfQueryRetriever](#selfqueryretriever)\n* [MultiVectorRetriever](#multivectorretriever)"},"last_modified":{"kind":"null"}}},{"rowIdx":1339,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/caching_embeddings/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* Caching\n\nOn this page\n\nCaching\n=======\n\nEmbeddings can be stored or temporarily cached to avoid needing to recompute them.\n\nCaching embeddings can be done using a `CacheBackedEmbeddings`. The cache backed embedder is a wrapper around an embedder that caches embeddings in a key-value store. The text is hashed and the hash is used as the key in the cache.\n\nThe main supported way to initialize a `CacheBackedEmbeddings` is `from_bytes_store`. It takes the following parameters:\n\n* underlying\\_embedder: The embedder to use for embedding.\n* document\\_embedding\\_cache: Any [`ByteStore`](/v0.2/docs/integrations/stores/) for caching document embeddings.\n* batch\\_size: (optional, defaults to `None`) The number of documents to embed between store updates.\n* namespace: (optional, defaults to `\"\"`) The namespace to use for document cache. This namespace is used to avoid collisions with other caches. For example, set it to the name of the embedding model used.\n* query\\_embedding\\_cache: (optional, defaults to `None` or not caching) A [`ByteStore`](/v0.2/docs/integrations/stores/) for caching query embeddings, or `True` to use the same store as `document_embedding_cache`.\n\n**Attention**:\n\n* Be sure to set the `namespace` parameter to avoid collisions of the same text embedded using different embeddings models.\n* `CacheBackedEmbeddings` does not cache query embeddings by default. To enable query caching, one need to specify a `query_embedding_cache`.\n\n from langchain.embeddings import CacheBackedEmbeddings\n\n**API Reference:**[CacheBackedEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain.embeddings.cache.CacheBackedEmbeddings.html)\n\nUsing with a Vector Store[​](#using-with-a-vector-store \"Direct link to Using with a Vector Store\")\n---------------------------------------------------------------------------------------------------\n\nFirst, let's see an example that uses the local file system for storing embeddings and uses FAISS vector store for retrieval.\n\n %pip install --upgrade --quiet langchain-openai faiss-cpu\n\n from langchain.storage import LocalFileStorefrom langchain_community.document_loaders import TextLoaderfrom langchain_community.vectorstores import FAISSfrom langchain_openai import OpenAIEmbeddingsfrom langchain_text_splitters import CharacterTextSplitterunderlying_embeddings = OpenAIEmbeddings()store = LocalFileStore(\"./cache/\")cached_embedder = CacheBackedEmbeddings.from_bytes_store( underlying_embeddings, store, namespace=underlying_embeddings.model)\n\n**API Reference:**[LocalFileStore](https://api.python.langchain.com/en/latest/storage/langchain.storage.file_system.LocalFileStore.html) | [TextLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.text.TextLoader.html) | [FAISS](https://api.python.langchain.com/en/latest/vectorstores/langchain_community.vectorstores.faiss.FAISS.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) | [CharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.CharacterTextSplitter.html)\n\nThe cache is empty prior to embedding:\n\n list(store.yield_keys())\n\n []\n\nLoad the document, split it into chunks, embed each chunk and load it into the vector store.\n\n raw_documents = TextLoader(\"state_of_the_union.txt\").load()text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)documents = text_splitter.split_documents(raw_documents)\n\nCreate the vector store:\n\n %%timedb = FAISS.from_documents(documents, cached_embedder)\n\n CPU times: user 218 ms, sys: 29.7 ms, total: 248 msWall time: 1.02 s\n\nIf we try to create the vector store again, it'll be much faster since it does not need to re-compute any embeddings.\n\n %%timedb2 = FAISS.from_documents(documents, cached_embedder)\n\n CPU times: user 15.7 ms, sys: 2.22 ms, total: 18 msWall time: 17.2 ms\n\nAnd here are some of the embeddings that got created:\n\n list(store.yield_keys())[:5]\n\n ['text-embedding-ada-00217a6727d-8916-54eb-b196-ec9c9d6ca472', 'text-embedding-ada-0025fc0d904-bd80-52da-95c9-441015bfb438', 'text-embedding-ada-002e4ad20ef-dfaa-5916-9459-f90c6d8e8159', 'text-embedding-ada-002ed199159-c1cd-5597-9757-f80498e8f17b', 'text-embedding-ada-0021297d37a-2bc1-5e19-bf13-6c950f075062']\n\nSwapping the `ByteStore`\n========================\n\nIn order to use a different `ByteStore`, just use it when creating your `CacheBackedEmbeddings`. Below, we create an equivalent cached embeddings object, except using the non-persistent `InMemoryByteStore` instead:\n\n from langchain.embeddings import CacheBackedEmbeddingsfrom langchain.storage import InMemoryByteStorestore = InMemoryByteStore()cached_embedder = CacheBackedEmbeddings.from_bytes_store( underlying_embeddings, store, namespace=underlying_embeddings.model)\n\n**API Reference:**[CacheBackedEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain.embeddings.cache.CacheBackedEmbeddings.html) | [InMemoryByteStore](https://api.python.langchain.com/en/latest/stores/langchain_core.stores.InMemoryByteStore.html)\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/caching_embeddings.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to add scores to retriever results\n\n](/v0.2/docs/how_to/add_scores_retriever/)[\n\nNext\n\nHow to use callbacks in async environments\n\n](/v0.2/docs/how_to/callbacks_async/)\n\n* [Using with a Vector Store](#using-with-a-vector-store)"},"last_modified":{"kind":"null"}}},{"rowIdx":1340,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/debugging/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to debug your LLM apps\n\nOn this page\n\nHow to debug your LLM apps\n==========================\n\nLike building any type of software, at some point you'll need to debug when building with LLMs. A model call will fail, or model output will be misformatted, or there will be some nested model calls and it won't be clear where along the way an incorrect output was created.\n\nThere are three main methods for debugging:\n\n* Verbose Mode: This adds print statements for \"important\" events in your chain.\n* Debug Mode: This add logging statements for ALL events in your chain.\n* LangSmith Tracing: This logs events to [LangSmith](https://docs.smith.langchain.com/) to allow for visualization there.\n\nVerbose Mode\n\nDebug Mode\n\nLangSmith Tracing\n\nFree\n\n✅\n\n✅\n\n✅\n\nUI\n\n❌\n\n❌\n\n✅\n\nPersisted\n\n❌\n\n❌\n\n✅\n\nSee all events\n\n❌\n\n✅\n\n✅\n\nSee \"important\" events\n\n✅\n\n❌\n\n✅\n\nRuns Locally\n\n✅\n\n✅\n\n❌\n\nTracing[​](#tracing \"Direct link to Tracing\")\n---------------------------------------------\n\nMany of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with [LangSmith](https://smith.langchain.com).\n\nAfter you sign up at the link above, make sure to set your environment variables to start logging traces:\n\n export LANGCHAIN_TRACING_V2=\"true\"export LANGCHAIN_API_KEY=\"...\"\n\nOr, if in a notebook, you can set them with:\n\n import getpassimport osos.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()\n\nLet's suppose we have an agent, and want to visualize the actions it takes and tool outputs it receives. Without any debugging, here's what we see:\n\n* OpenAI\n* Anthropic\n* Azure\n* Google\n* Cohere\n* FireworksAI\n* Groq\n* MistralAI\n* TogetherAI\n\n pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"OPENAI_API_KEY\"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")\n\n pip install -qU langchain-anthropic\n\n import getpassimport osos.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()from langchain_anthropic import ChatAnthropicllm = ChatAnthropic(model=\"claude-3-sonnet-20240229\")\n\n pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"AZURE_OPENAI_API_KEY\"] = getpass.getpass()from langchain_openai import AzureChatOpenAIllm = AzureChatOpenAI( azure_endpoint=os.environ[\"AZURE_OPENAI_ENDPOINT\"], azure_deployment=os.environ[\"AZURE_OPENAI_DEPLOYMENT_NAME\"], openai_api_version=os.environ[\"AZURE_OPENAI_API_VERSION\"],)\n\n pip install -qU langchain-google-vertexai\n\n import getpassimport osos.environ[\"GOOGLE_API_KEY\"] = getpass.getpass()from langchain_google_vertexai import ChatVertexAIllm = ChatVertexAI(model=\"gemini-pro\")\n\n pip install -qU langchain-cohere\n\n import getpassimport osos.environ[\"COHERE_API_KEY\"] = getpass.getpass()from langchain_cohere import ChatCoherellm = ChatCohere(model=\"command-r\")\n\n pip install -qU langchain-fireworks\n\n import getpassimport osos.environ[\"FIREWORKS_API_KEY\"] = getpass.getpass()from langchain_fireworks import ChatFireworksllm = ChatFireworks(model=\"accounts/fireworks/models/mixtral-8x7b-instruct\")\n\n pip install -qU langchain-groq\n\n import getpassimport osos.environ[\"GROQ_API_KEY\"] = getpass.getpass()from langchain_groq import ChatGroqllm = ChatGroq(model=\"llama3-8b-8192\")\n\n pip install -qU langchain-mistralai\n\n import getpassimport osos.environ[\"MISTRAL_API_KEY\"] = getpass.getpass()from langchain_mistralai import ChatMistralAIllm = ChatMistralAI(model=\"mistral-large-latest\")\n\n pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"TOGETHER_API_KEY\"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI( base_url=\"https://api.together.xyz/v1\", api_key=os.environ[\"TOGETHER_API_KEY\"], model=\"mistralai/Mixtral-8x7B-Instruct-v0.1\",)\n\n from langchain.agents import AgentExecutor, create_tool_calling_agentfrom langchain_community.tools.tavily_search import TavilySearchResultsfrom langchain_core.prompts import ChatPromptTemplatetools = [TavilySearchResults(max_results=1)]prompt = ChatPromptTemplate.from_messages( [ ( \"system\", \"You are a helpful assistant.\", ), (\"placeholder\", \"{chat_history}\"), (\"human\", \"{input}\"), (\"placeholder\", \"{agent_scratchpad}\"), ])# Construct the Tools agentagent = create_tool_calling_agent(llm, tools, prompt)# Create an agent executor by passing in the agent and toolsagent_executor = AgentExecutor(agent=agent, tools=tools)agent_executor.invoke( {\"input\": \"Who directed the 2023 film Oppenheimer and what is their age in days?\"})\n\n**API Reference:**[AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html) | [create\\_tool\\_calling\\_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.tool_calling_agent.base.create_tool_calling_agent.html) | [TavilySearchResults](https://api.python.langchain.com/en/latest/tools/langchain_community.tools.tavily_search.tool.TavilySearchResults.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html)\n\n {'input': 'Who directed the 2023 film Oppenheimer and what is their age in days?', 'output': 'The 2023 film \"Oppenheimer\" was directed by Christopher Nolan.\\n\\nTo calculate Christopher Nolan\\'s age in days, we first need his birthdate, which is July 30, 1970. Let\\'s calculate his age in days from his birthdate to today\\'s date, December 7, 2023.\\n\\n1. Calculate the total number of days from July 30, 1970, to December 7, 2023.\\n2. Nolan was born on July 30, 1970. From July 30, 1970, to July 30, 2023, is 53 years.\\n3. From July 30, 2023, to December 7, 2023, is 130 days.\\n\\nNow, calculate the total days:\\n- 53 years = 53 x 365 = 19,345 days\\n- Adding leap years from 1970 to 2023: There are 13 leap years (1972, 1976, 1980, 1984, 1988, 1992, 1996, 2000, 2004, 2008, 2012, 2016, 2020). So, add 13 days.\\n- Total days from years and leap years = 19,345 + 13 = 19,358 days\\n- Add the days from July 30, 2023, to December 7, 2023 = 130 days\\n\\nTotal age in days = 19,358 + 130 = 19,488 days\\n\\nChristopher Nolan is 19,488 days old as of December 7, 2023.'}\n\nWe don't get much output, but since we set up LangSmith we can easily see what happened under the hood:\n\n[https://smith.langchain.com/public/a89ff88f-9ddc-4757-a395-3a1b365655bf/r](https://smith.langchain.com/public/a89ff88f-9ddc-4757-a395-3a1b365655bf/r)\n\n`set_debug` and `set_verbose`[​](#set_debug-and-set_verbose \"Direct link to set_debug-and-set_verbose\")\n-------------------------------------------------------------------------------------------------------\n\nIf you're prototyping in Jupyter Notebooks or running Python scripts, it can be helpful to print out the intermediate steps of a chain run.\n\nThere are a number of ways to enable printing at varying degrees of verbosity.\n\nNote: These still work even with LangSmith enabled, so you can have both turned on and running at the same time\n\n### `set_verbose(True)`[​](#set_verbosetrue \"Direct link to set_verbosetrue\")\n\nSetting the `verbose` flag will print out inputs and outputs in a slightly more readable format and will skip logging certain raw outputs (like the token usage stats for an LLM call) so that you can focus on application logic.\n\n from langchain.globals import set_verboseset_verbose(True)agent_executor = AgentExecutor(agent=agent, tools=tools)agent_executor.invoke( {\"input\": \"Who directed the 2023 film Oppenheimer and what is their age in days?\"})\n\n**API Reference:**[set\\_verbose](https://api.python.langchain.com/en/latest/globals/langchain.globals.set_verbose.html)\n\n \u001b[1m> Entering new AgentExecutor chain...\u001b[0m\u001b[32;1m\u001b[1;3mInvoking: `tavily_search_results_json` with `{'query': 'director of the 2023 film Oppenheimer'}`\u001b[0m\u001b[36;1m\u001b[1;3m[{'url': 'https://m.imdb.com/title/tt15398776/', 'content': 'Oppenheimer: Directed by Christopher Nolan. With Cillian Murphy, Emily Blunt, Robert Downey Jr., Alden Ehrenreich. The story of American scientist J. Robert Oppenheimer and his role in the development of the atomic bomb.'}]\u001b[0m\u001b[32;1m\u001b[1;3mInvoking: `tavily_search_results_json` with `{'query': 'birth date of Christopher Nolan'}`\u001b[0m\u001b[36;1m\u001b[1;3m[{'url': 'https://m.imdb.com/name/nm0634240/bio/', 'content': 'Christopher Nolan. Writer: Tenet. Best known for his cerebral, often nonlinear, storytelling, acclaimed Academy Award winner writer/director/producer Sir Christopher Nolan CBE was born in London, England. Over the course of more than 25 years of filmmaking, Nolan has gone from low-budget independent films to working on some of the biggest blockbusters ever made and became one of the most ...'}]\u001b[0m\u001b[32;1m\u001b[1;3mInvoking: `tavily_search_results_json` with `{'query': 'Christopher Nolan birth date'}`responded: The 2023 film **Oppenheimer** was directed by **Christopher Nolan**.To calculate Christopher Nolan's age in days, I need his exact birth date. Let me find that information for you.\u001b[0m\u001b[36;1m\u001b[1;3m[{'url': 'https://m.imdb.com/name/nm0634240/bio/', 'content': 'Christopher Nolan. Writer: Tenet. Best known for his cerebral, often nonlinear, storytelling, acclaimed Academy Award winner writer/director/producer Sir Christopher Nolan CBE was born in London, England. Over the course of more than 25 years of filmmaking, Nolan has gone from low-budget independent films to working on some of the biggest blockbusters ever made and became one of the most ...'}]\u001b[0m\u001b[32;1m\u001b[1;3mInvoking: `tavily_search_results_json` with `{'query': 'Christopher Nolan date of birth'}`responded: It appears that I need to refine my search to get the exact birth date of Christopher Nolan. Let me try again to find that specific information.\u001b[0m\u001b[36;1m\u001b[1;3m[{'url': 'https://m.imdb.com/name/nm0634240/bio/', 'content': 'Christopher Nolan. Writer: Tenet. Best known for his cerebral, often nonlinear, storytelling, acclaimed Academy Award winner writer/director/producer Sir Christopher Nolan CBE was born in London, England. Over the course of more than 25 years of filmmaking, Nolan has gone from low-budget independent films to working on some of the biggest blockbusters ever made and became one of the most ...'}]\u001b[0m\u001b[32;1m\u001b[1;3mI am currently unable to retrieve the exact birth date of Christopher Nolan from the sources available. However, it is widely known that he was born on July 30, 1970. Using this date, I can calculate his age in days as of today.Let's calculate:- Christopher Nolan's birth date: July 30, 1970.- Today's date: December 7, 2023.The number of days between these two dates can be calculated as follows:1. From July 30, 1970, to July 30, 2023, is 53 years.2. From July 30, 2023, to December 7, 2023, is 130 days.Calculating the total days for 53 years (considering leap years):- 53 years × 365 days/year = 19,345 days- Adding leap years (1972, 1976, ..., 2020, 2024 - 13 leap years): 13 daysTotal days from birth until July 30, 2023: 19,345 + 13 = 19,358 daysAdding the days from July 30, 2023, to December 7, 2023: 130 daysTotal age in days as of December 7, 2023: 19,358 + 130 = 19,488 days.Therefore, Christopher Nolan is 19,488 days old as of December 7, 2023.\u001b[0m\u001b[1m> Finished chain.\u001b[0m\n\n {'input': 'Who directed the 2023 film Oppenheimer and what is their age in days?', 'output': \"I am currently unable to retrieve the exact birth date of Christopher Nolan from the sources available. However, it is widely known that he was born on July 30, 1970. Using this date, I can calculate his age in days as of today.\\n\\nLet's calculate:\\n\\n- Christopher Nolan's birth date: July 30, 1970.\\n- Today's date: December 7, 2023.\\n\\nThe number of days between these two dates can be calculated as follows:\\n\\n1. From July 30, 1970, to July 30, 2023, is 53 years.\\n2. From July 30, 2023, to December 7, 2023, is 130 days.\\n\\nCalculating the total days for 53 years (considering leap years):\\n- 53 years × 365 days/year = 19,345 days\\n- Adding leap years (1972, 1976, ..., 2020, 2024 - 13 leap years): 13 days\\n\\nTotal days from birth until July 30, 2023: 19,345 + 13 = 19,358 days\\nAdding the days from July 30, 2023, to December 7, 2023: 130 days\\n\\nTotal age in days as of December 7, 2023: 19,358 + 130 = 19,488 days.\\n\\nTherefore, Christopher Nolan is 19,488 days old as of December 7, 2023.\"}\n\n### `set_debug(True)`[​](#set_debugtrue \"Direct link to set_debugtrue\")\n\nSetting the global `debug` flag will cause all LangChain components with callback support (chains, models, agents, tools, retrievers) to print the inputs they receive and outputs they generate. This is the most verbose setting and will fully log raw inputs and outputs.\n\n from langchain.globals import set_debugset_debug(True)set_verbose(False)agent_executor = AgentExecutor(agent=agent, tools=tools)agent_executor.invoke( {\"input\": \"Who directed the 2023 film Oppenheimer and what is their age in days?\"})\n\n**API Reference:**[set\\_debug](https://api.python.langchain.com/en/latest/globals/langchain.globals.set_debug.html)\n\n \u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:AgentExecutor] Entering Chain run with input:\u001b[0m{ \"input\": \"Who directed the 2023 film Oppenheimer and what is their age in days?\"}\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 2:chain:RunnableSequence] Entering Chain run with input:\u001b[0m{ \"input\": \"\"}\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 2:chain:RunnableSequence > 3:chain:RunnableAssign] Entering Chain run with input:\u001b[0m{ \"input\": \"\"}\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 2:chain:RunnableSequence > 3:chain:RunnableAssign > 4:chain:RunnableParallel] Entering Chain run with input:\u001b[0m{ \"input\": \"\"}\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 2:chain:RunnableSequence > 3:chain:RunnableAssign > 4:chain:RunnableParallel > 5:chain:RunnableLambda] Entering Chain run with input:\u001b[0m{ \"input\": \"\"}\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 2:chain:RunnableSequence > 3:chain:RunnableAssign > 4:chain:RunnableParallel > 5:chain:RunnableLambda] [1ms] Exiting Chain run with output:\u001b[0m{ \"output\": []}\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 2:chain:RunnableSequence > 3:chain:RunnableAssign > 4:chain:RunnableParallel] [2ms] Exiting Chain run with output:\u001b[0m{ \"agent_scratchpad\": []}\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 2:chain:RunnableSequence > 3:chain:RunnableAssign] [5ms] Exiting Chain run with output:\u001b[0m{ \"input\": \"Who directed the 2023 film Oppenheimer and what is their age in days?\", \"intermediate_steps\": [], \"agent_scratchpad\": []}\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 2:chain:RunnableSequence > 6:prompt:ChatPromptTemplate] Entering Prompt run with input:\u001b[0m{ \"input\": \"Who directed the 2023 film Oppenheimer and what is their age in days?\", \"intermediate_steps\": [], \"agent_scratchpad\": []}\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 2:chain:RunnableSequence > 6:prompt:ChatPromptTemplate] [1ms] Exiting Prompt run with output:\u001b[0m[outputs]\u001b[32;1m\u001b[1;3m[llm/start]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 2:chain:RunnableSequence > 7:llm:ChatOpenAI] Entering LLM run with input:\u001b[0m{ \"prompts\": [ \"System: You are a helpful assistant.\\nHuman: Who directed the 2023 film Oppenheimer and what is their age in days?\" ]}\u001b[36;1m\u001b[1;3m[llm/end]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 2:chain:RunnableSequence > 7:llm:ChatOpenAI] [3.17s] Exiting LLM run with output:\u001b[0m{ \"generations\": [ [ { \"text\": \"\", \"generation_info\": { \"finish_reason\": \"tool_calls\" }, \"type\": \"ChatGenerationChunk\", \"message\": { \"lc\": 1, \"type\": \"constructor\", \"id\": [ \"langchain\", \"schema\", \"messages\", \"AIMessageChunk\" ], \"kwargs\": { \"content\": \"\", \"example\": false, \"additional_kwargs\": { \"tool_calls\": [ { \"index\": 0, \"id\": \"call_fnfq6GjSQED4iF6lo4rxkUup\", \"function\": { \"arguments\": \"{\\\"query\\\": \\\"director of the 2023 film Oppenheimer\\\"}\", \"name\": \"tavily_search_results_json\" }, \"type\": \"function\" }, { \"index\": 1, \"id\": \"call_mwhVi6pk49f4OIo5rOWrr4TD\", \"function\": { \"arguments\": \"{\\\"query\\\": \\\"birth date of Christopher Nolan\\\"}\", \"name\": \"tavily_search_results_json\" }, \"type\": \"function\" } ] }, \"tool_call_chunks\": [ { \"name\": \"tavily_search_results_json\", \"args\": \"{\\\"query\\\": \\\"director of the 2023 film Oppenheimer\\\"}\", \"id\": \"call_fnfq6GjSQED4iF6lo4rxkUup\", \"index\": 0 }, { \"name\": \"tavily_search_results_json\", \"args\": \"{\\\"query\\\": \\\"birth date of Christopher Nolan\\\"}\", \"id\": \"call_mwhVi6pk49f4OIo5rOWrr4TD\", \"index\": 1 } ], \"response_metadata\": { \"finish_reason\": \"tool_calls\" }, \"id\": \"run-6e160323-15f9-491d-aadf-b5d337e9e2a1\", \"tool_calls\": [ { \"name\": \"tavily_search_results_json\", \"args\": { \"query\": \"director of the 2023 film Oppenheimer\" }, \"id\": \"call_fnfq6GjSQED4iF6lo4rxkUup\" }, { \"name\": \"tavily_search_results_json\", \"args\": { \"query\": \"birth date of Christopher Nolan\" }, \"id\": \"call_mwhVi6pk49f4OIo5rOWrr4TD\" } ], \"invalid_tool_calls\": [] } } } ] ], \"llm_output\": null, \"run\": null}\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 2:chain:RunnableSequence > 8:parser:ToolsAgentOutputParser] Entering Parser run with input:\u001b[0m[inputs]\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 2:chain:RunnableSequence > 8:parser:ToolsAgentOutputParser] [1ms] Exiting Parser run with output:\u001b[0m[outputs]\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 2:chain:RunnableSequence] [3.18s] Exiting Chain run with output:\u001b[0m[outputs]\u001b[32;1m\u001b[1;3m[tool/start]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 9:tool:tavily_search_results_json] Entering Tool run with input:\u001b[0m\"{'query': 'director of the 2023 film Oppenheimer'}\"``````outputError in ConsoleCallbackHandler.on_tool_end callback: AttributeError(\"'list' object has no attribute 'strip'\")``````output\u001b[32;1m\u001b[1;3m[tool/start]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 10:tool:tavily_search_results_json] Entering Tool run with input:\u001b[0m\"{'query': 'birth date of Christopher Nolan'}\"``````outputError in ConsoleCallbackHandler.on_tool_end callback: AttributeError(\"'list' object has no attribute 'strip'\")``````output\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 11:chain:RunnableSequence] Entering Chain run with input:\u001b[0m{ \"input\": \"\"}\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 11:chain:RunnableSequence > 12:chain:RunnableAssign] Entering Chain run with input:\u001b[0m{ \"input\": \"\"}\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 11:chain:RunnableSequence > 12:chain:RunnableAssign > 13:chain:RunnableParallel] Entering Chain run with input:\u001b[0m{ \"input\": \"\"}\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 11:chain:RunnableSequence > 12:chain:RunnableAssign > 13:chain:RunnableParallel > 14:chain:RunnableLambda] Entering Chain run with input:\u001b[0m{ \"input\": \"\"}\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 11:chain:RunnableSequence > 12:chain:RunnableAssign > 13:chain:RunnableParallel > 14:chain:RunnableLambda] [1ms] Exiting Chain run with output:\u001b[0m[outputs]\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 11:chain:RunnableSequence > 12:chain:RunnableAssign > 13:chain:RunnableParallel] [4ms] Exiting Chain run with output:\u001b[0m[outputs]\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 11:chain:RunnableSequence > 12:chain:RunnableAssign] [8ms] Exiting Chain run with output:\u001b[0m[outputs]\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 11:chain:RunnableSequence > 15:prompt:ChatPromptTemplate] Entering Prompt run with input:\u001b[0m[inputs]\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 11:chain:RunnableSequence > 15:prompt:ChatPromptTemplate] [1ms] Exiting Prompt run with output:\u001b[0m[outputs]\u001b[32;1m\u001b[1;3m[llm/start]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 11:chain:RunnableSequence > 16:llm:ChatOpenAI] Entering LLM run with input:\u001b[0m{ \"prompts\": [ \"System: You are a helpful assistant.\\nHuman: Who directed the 2023 film Oppenheimer and what is their age in days?\\nAI: \\nTool: [{\\\"url\\\": \\\"https://m.imdb.com/title/tt15398776/fullcredits/\\\", \\\"content\\\": \\\"Oppenheimer (2023) cast and crew credits, including actors, actresses, directors, writers and more. Menu. ... director of photography: behind-the-scenes Jason Gary ... best boy grip ... film loader Luc Poullain ... aerial coordinator\\\"}]\\nTool: [{\\\"url\\\": \\\"https://en.wikipedia.org/wiki/Christopher_Nolan\\\", \\\"content\\\": \\\"In early 2003, Nolan approached Warner Bros. with the idea of making a new Batman film, based on the character's origin story.[58] Nolan was fascinated by the notion of grounding it in a more realistic world than a comic-book fantasy.[59] He relied heavily on traditional stunts and miniature effects during filming, with minimal use of computer-generated imagery (CGI).[60] Batman Begins (2005), the biggest project Nolan had undertaken to that point,[61] was released to critical acclaim and commercial success.[62][63] Starring Christian Bale as Bruce Wayne / Batman—along with Michael Caine, Gary Oldman, Morgan Freeman and Liam Neeson—Batman Begins revived the franchise.[64][65] Batman Begins was 2005's ninth-highest-grossing film and was praised for its psychological depth and contemporary relevance;[63][66] it is cited as one of the most influential films of the 2000s.[67] Film author Ian Nathan wrote that within five years of his career, Nolan \\\\\\\"[went] from unknown to indie darling to gaining creative control over one of the biggest properties in Hollywood, and (perhaps unwittingly) fomenting the genre that would redefine the entire industry\\\\\\\".[68]\\\\nNolan directed, co-wrote and produced The Prestige (2006), an adaptation of the Christopher Priest novel about two rival 19th-century magicians.[69] He directed, wrote and edited the short film Larceny (1996),[19] which was filmed over a weekend in black and white with limited equipment and a small cast and crew.[12][20] Funded by Nolan and shot with the UCL Union Film society's equipment, it appeared at the Cambridge Film Festival in 1996 and is considered one of UCL's best shorts.[21] For unknown reasons, the film has since been removed from public view.[19] Nolan filmed a third short, Doodlebug (1997), about a man seemingly chasing an insect with his shoe, only to discover that it is a miniature of himself.[14][22] Nolan and Thomas first attempted to make a feature in the mid-1990s with Larry Mahoney, which they scrapped.[23] During this period in his career, Nolan had little to no success getting his projects off the ground, facing several rejections; he added, \\\\\\\"[T]here's a very limited pool of finance in the UK. Philosophy professor David Kyle Johnson wrote that \\\\\\\"Inception became a classic almost as soon as it was projected on silver screens\\\\\\\", praising its exploration of philosophical ideas, including leap of faith and allegory of the cave.[97] The film grossed over $836 million worldwide.[98] Nominated for eight Academy Awards—including Best Picture and Best Original Screenplay—it won Best Cinematography, Best Sound Mixing, Best Sound Editing and Best Visual Effects.[99] Nolan was nominated for a BAFTA Award and a Golden Globe Award for Best Director, among other accolades.[40]\\\\nAround the release of The Dark Knight Rises (2012), Nolan's third and final Batman film, Joseph Bevan of the British Film Institute wrote a profile on him: \\\\\\\"In the space of just over a decade, Christopher Nolan has shot from promising British indie director to undisputed master of a new brand of intelligent escapism. He further wrote that Nolan's body of work reflect \\\\\\\"a heterogeneity of conditions of products\\\\\\\" extending from low-budget films to lucrative blockbusters, \\\\\\\"a wide range of genres and settings\\\\\\\" and \\\\\\\"a diversity of styles that trumpet his versatility\\\\\\\".[193]\\\\nDavid Bordwell, a film theorist, wrote that Nolan has been able to blend his \\\\\\\"experimental impulses\\\\\\\" with the demands of mainstream entertainment, describing his oeuvre as \\\\\\\"experiments with cinematic time by means of techniques of subjective viewpoint and crosscutting\\\\\\\".[194] Nolan's use of practical, in-camera effects, miniatures and models, as well as shooting on celluloid film, has been highly influential in early 21st century cinema.[195][196] IndieWire wrote in 2019 that, Nolan \\\\\\\"kept a viable alternate model of big-budget filmmaking alive\\\\\\\", in an era where blockbuster filmmaking has become \\\\\\\"a largely computer-generated art form\\\\\\\".[196] Initially reluctant to make a sequel, he agreed after Warner Bros. repeatedly insisted.[78] Nolan wanted to expand on the noir quality of the first film by broadening the canvas and taking on \\\\\\\"the dynamic of a story of the city, a large crime story ... where you're looking at the police, the justice system, the vigilante, the poor people, the rich people, the criminals\\\\\\\".[79] Continuing to minimalise the use of CGI, Nolan employed high-resolution IMAX cameras, making it the first major motion picture to use this technology.[80][81]\\\"}]\" ]}\u001b[36;1m\u001b[1;3m[llm/end]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 11:chain:RunnableSequence > 16:llm:ChatOpenAI] [20.22s] Exiting LLM run with output:\u001b[0m{ \"generations\": [ [ { \"text\": \"The 2023 film \\\"Oppenheimer\\\" was directed by Christopher Nolan.\\n\\nTo calculate Christopher Nolan's age in days, we first need his birth date, which is July 30, 1970. Let's calculate his age in days from his birth date to today's date, December 7, 2023.\\n\\n1. Calculate the total number of days from July 30, 1970, to December 7, 2023.\\n2. Christopher Nolan was born on July 30, 1970. From July 30, 1970, to July 30, 2023, is 53 years.\\n3. From July 30, 2023, to December 7, 2023, is 130 days.\\n\\nNow, calculate the total days for 53 years:\\n- Each year has 365 days, so 53 years × 365 days/year = 19,345 days.\\n- Adding the leap years from 1970 to 2023: 1972, 1976, 1980, 1984, 1988, 1992, 1996, 2000, 2004, 2008, 2012, 2016, 2020, and 2024 (up to February). This gives us 14 leap years.\\n- Total days from leap years: 14 days.\\n\\nAdding all together:\\n- Total days = 19,345 days (from years) + 14 days (from leap years) + 130 days (from July 30, 2023, to December 7, 2023) = 19,489 days.\\n\\nTherefore, as of December 7, 2023, Christopher Nolan is 19,489 days old.\", \"generation_info\": { \"finish_reason\": \"stop\" }, \"type\": \"ChatGenerationChunk\", \"message\": { \"lc\": 1, \"type\": \"constructor\", \"id\": [ \"langchain\", \"schema\", \"messages\", \"AIMessageChunk\" ], \"kwargs\": { \"content\": \"The 2023 film \\\"Oppenheimer\\\" was directed by Christopher Nolan.\\n\\nTo calculate Christopher Nolan's age in days, we first need his birth date, which is July 30, 1970. Let's calculate his age in days from his birth date to today's date, December 7, 2023.\\n\\n1. Calculate the total number of days from July 30, 1970, to December 7, 2023.\\n2. Christopher Nolan was born on July 30, 1970. From July 30, 1970, to July 30, 2023, is 53 years.\\n3. From July 30, 2023, to December 7, 2023, is 130 days.\\n\\nNow, calculate the total days for 53 years:\\n- Each year has 365 days, so 53 years × 365 days/year = 19,345 days.\\n- Adding the leap years from 1970 to 2023: 1972, 1976, 1980, 1984, 1988, 1992, 1996, 2000, 2004, 2008, 2012, 2016, 2020, and 2024 (up to February). This gives us 14 leap years.\\n- Total days from leap years: 14 days.\\n\\nAdding all together:\\n- Total days = 19,345 days (from years) + 14 days (from leap years) + 130 days (from July 30, 2023, to December 7, 2023) = 19,489 days.\\n\\nTherefore, as of December 7, 2023, Christopher Nolan is 19,489 days old.\", \"example\": false, \"additional_kwargs\": {}, \"tool_call_chunks\": [], \"response_metadata\": { \"finish_reason\": \"stop\" }, \"id\": \"run-1c08a44f-db70-4836-935b-417caaf422a5\", \"tool_calls\": [], \"invalid_tool_calls\": [] } } } ] ], \"llm_output\": null, \"run\": null}\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 11:chain:RunnableSequence > 17:parser:ToolsAgentOutputParser] Entering Parser run with input:\u001b[0m[inputs]\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 11:chain:RunnableSequence > 17:parser:ToolsAgentOutputParser] [2ms] Exiting Parser run with output:\u001b[0m[outputs]\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 11:chain:RunnableSequence] [20.27s] Exiting Chain run with output:\u001b[0m[outputs]\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:AgentExecutor] [26.37s] Exiting Chain run with output:\u001b[0m{ \"output\": \"The 2023 film \\\"Oppenheimer\\\" was directed by Christopher Nolan.\\n\\nTo calculate Christopher Nolan's age in days, we first need his birth date, which is July 30, 1970. Let's calculate his age in days from his birth date to today's date, December 7, 2023.\\n\\n1. Calculate the total number of days from July 30, 1970, to December 7, 2023.\\n2. Christopher Nolan was born on July 30, 1970. From July 30, 1970, to July 30, 2023, is 53 years.\\n3. From July 30, 2023, to December 7, 2023, is 130 days.\\n\\nNow, calculate the total days for 53 years:\\n- Each year has 365 days, so 53 years × 365 days/year = 19,345 days.\\n- Adding the leap years from 1970 to 2023: 1972, 1976, 1980, 1984, 1988, 1992, 1996, 2000, 2004, 2008, 2012, 2016, 2020, and 2024 (up to February). This gives us 14 leap years.\\n- Total days from leap years: 14 days.\\n\\nAdding all together:\\n- Total days = 19,345 days (from years) + 14 days (from leap years) + 130 days (from July 30, 2023, to December 7, 2023) = 19,489 days.\\n\\nTherefore, as of December 7, 2023, Christopher Nolan is 19,489 days old.\"}\n\n {'input': 'Who directed the 2023 film Oppenheimer and what is their age in days?', 'output': 'The 2023 film \"Oppenheimer\" was directed by Christopher Nolan.\\n\\nTo calculate Christopher Nolan\\'s age in days, we first need his birth date, which is July 30, 1970. Let\\'s calculate his age in days from his birth date to today\\'s date, December 7, 2023.\\n\\n1. Calculate the total number of days from July 30, 1970, to December 7, 2023.\\n2. Christopher Nolan was born on July 30, 1970. From July 30, 1970, to July 30, 2023, is 53 years.\\n3. From July 30, 2023, to December 7, 2023, is 130 days.\\n\\nNow, calculate the total days for 53 years:\\n- Each year has 365 days, so 53 years × 365 days/year = 19,345 days.\\n- Adding the leap years from 1970 to 2023: 1972, 1976, 1980, 1984, 1988, 1992, 1996, 2000, 2004, 2008, 2012, 2016, 2020, and 2024 (up to February). This gives us 14 leap years.\\n- Total days from leap years: 14 days.\\n\\nAdding all together:\\n- Total days = 19,345 days (from years) + 14 days (from leap years) + 130 days (from July 30, 2023, to December 7, 2023) = 19,489 days.\\n\\nTherefore, as of December 7, 2023, Christopher Nolan is 19,489 days old.'}\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/debugging.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to create custom tools\n\n](/v0.2/docs/how_to/custom_tools/)[\n\nNext\n\nHow to load CSVs\n\n](/v0.2/docs/how_to/document_loader_csv/)\n\n* [Tracing](#tracing)\n* [`set_debug` and `set_verbose`](#set_debug-and-set_verbose)\n * [`set_verbose(True)`](#set_verbosetrue)\n * [`set_debug(True)`](#set_debugtrue)"},"last_modified":{"kind":"null"}}},{"rowIdx":1341,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/custom_tools/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to create custom tools\n\nOn this page\n\nHow to create custom tools\n==========================\n\nWhen constructing an agent, you will need to provide it with a list of `Tool`s that it can use. Besides the actual function that is called, the Tool consists of several components:\n\nAttribute\n\nType\n\nDescription\n\nname\n\nstr\n\nMust be unique within a set of tools provided to an LLM or agent.\n\ndescription\n\nstr\n\nDescribes what the tool does. Used as context by the LLM or agent.\n\nargs\\_schema\n\nPydantic BaseModel\n\nOptional but recommended, can be used to provide more information (e.g., few-shot examples) or validation for expected parameters\n\nreturn\\_direct\n\nboolean\n\nOnly relevant for agents. When True, after invoking the given tool, the agent will stop and return the result direcly to the user.\n\nLangChain provides 3 ways to create tools:\n\n1. Using [@tool decorator](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html#langchain_core.tools.tool) -- the simplest way to define a custom tool.\n2. Using [StructuredTool.from\\_function](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.StructuredTool.html#langchain_core.tools.StructuredTool.from_function) class method -- this is similar to the `@tool` decorator, but allows more configuration and specification of both sync and async implementations.\n3. By sub-classing from [BaseTool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html) -- This is the most flexible method, it provides the largest degree of control, at the expense of more effort and code.\n\nThe `@tool` or the `StructuredTool.from_function` class method should be sufficient for most use cases.\n\ntip\n\nModels will perform better if the tools have well chosen names, descriptions and JSON schemas.\n\n@tool decorator[​](#tool-decorator \"Direct link to @tool decorator\")\n--------------------------------------------------------------------\n\nThis `@tool` decorator is the simplest way to define a custom tool. The decorator uses the function name as the tool name by default, but this can be overridden by passing a string as the first argument. Additionally, the decorator will use the function's docstring as the tool's description - so a docstring MUST be provided.\n\n from langchain_core.tools import tool@tooldef multiply(a: int, b: int) -> int: \"\"\"Multiply two numbers.\"\"\" return a * b# Let's inspect some of the attributes associated with the tool.print(multiply.name)print(multiply.description)print(multiply.args)\n\n**API Reference:**[tool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html)\n\n multiplymultiply(a: int, b: int) -> int - Multiply two numbers.{'a': {'title': 'A', 'type': 'integer'}, 'b': {'title': 'B', 'type': 'integer'}}\n\nOr create an **async** implementation, like this:\n\n from langchain_core.tools import tool@toolasync def amultiply(a: int, b: int) -> int: \"\"\"Multiply two numbers.\"\"\" return a * b\n\n**API Reference:**[tool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html)\n\nYou can also customize the tool name and JSON args by passing them into the tool decorator.\n\n from langchain.pydantic_v1 import BaseModel, Fieldclass CalculatorInput(BaseModel): a: int = Field(description=\"first number\") b: int = Field(description=\"second number\")@tool(\"multiplication-tool\", args_schema=CalculatorInput, return_direct=True)def multiply(a: int, b: int) -> int: \"\"\"Multiply two numbers.\"\"\" return a * b# Let's inspect some of the attributes associated with the tool.print(multiply.name)print(multiply.description)print(multiply.args)print(multiply.return_direct)\n\n multiplication-toolmultiplication-tool(a: int, b: int) -> int - Multiply two numbers.{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}True\n\nStructuredTool[​](#structuredtool \"Direct link to StructuredTool\")\n------------------------------------------------------------------\n\nThe `StrurcturedTool.from_function` class method provides a bit more configurability than the `@tool` decorator, without requiring much additional code.\n\n from langchain_core.tools import StructuredTooldef multiply(a: int, b: int) -> int: \"\"\"Multiply two numbers.\"\"\" return a * basync def amultiply(a: int, b: int) -> int: \"\"\"Multiply two numbers.\"\"\" return a * bcalculator = StructuredTool.from_function(func=multiply, coroutine=amultiply)print(calculator.invoke({\"a\": 2, \"b\": 3}))print(await calculator.ainvoke({\"a\": 2, \"b\": 5}))\n\n**API Reference:**[StructuredTool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.StructuredTool.html)\n\n 610\n\nTo configure it:\n\n class CalculatorInput(BaseModel): a: int = Field(description=\"first number\") b: int = Field(description=\"second number\")def multiply(a: int, b: int) -> int: \"\"\"Multiply two numbers.\"\"\" return a * bcalculator = StructuredTool.from_function( func=multiply, name=\"Calculator\", description=\"multiply numbers\", args_schema=CalculatorInput, return_direct=True, # coroutine= ... <- you can specify an async method if desired as well)print(calculator.invoke({\"a\": 2, \"b\": 3}))print(calculator.name)print(calculator.description)print(calculator.args)\n\n 6CalculatorCalculator(a: int, b: int) -> int - multiply numbers{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n\nSubclass BaseTool[​](#subclass-basetool \"Direct link to Subclass BaseTool\")\n---------------------------------------------------------------------------\n\nYou can define a custom tool by sub-classing from `BaseTool`. This provides maximal control over the tool definition, but requires writing more code.\n\n from typing import Optional, Typefrom langchain.pydantic_v1 import BaseModelfrom langchain_core.callbacks import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun,)from langchain_core.tools import BaseToolclass CalculatorInput(BaseModel): a: int = Field(description=\"first number\") b: int = Field(description=\"second number\")class CustomCalculatorTool(BaseTool): name = \"Calculator\" description = \"useful for when you need to answer questions about math\" args_schema: Type[BaseModel] = CalculatorInput return_direct: bool = True def _run( self, a: int, b: int, run_manager: Optional[CallbackManagerForToolRun] = None ) -> str: \"\"\"Use the tool.\"\"\" return a * b async def _arun( self, a: int, b: int, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: \"\"\"Use the tool asynchronously.\"\"\" # If the calculation is cheap, you can just delegate to the sync implementation # as shown below. # If the sync calculation is expensive, you should delete the entire _arun method. # LangChain will automatically provide a better implementation that will # kick off the task in a thread to make sure it doesn't block other async code. return self._run(a, b, run_manager=run_manager.get_sync())\n\n**API Reference:**[AsyncCallbackManagerForToolRun](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.AsyncCallbackManagerForToolRun.html) | [CallbackManagerForToolRun](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.CallbackManagerForToolRun.html) | [BaseTool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html)\n\n multiply = CustomCalculatorTool()print(multiply.name)print(multiply.description)print(multiply.args)print(multiply.return_direct)print(multiply.invoke({\"a\": 2, \"b\": 3}))print(await multiply.ainvoke({\"a\": 2, \"b\": 3}))\n\n Calculatoruseful for when you need to answer questions about math{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}True66\n\nHow to create async tools[​](#how-to-create-async-tools \"Direct link to How to create async tools\")\n---------------------------------------------------------------------------------------------------\n\nLangChain Tools implement the [Runnable interface 🏃](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html).\n\nAll Runnables expose the `invoke` and `ainvoke` methods (as well as other methods like `batch`, `abatch`, `astream` etc).\n\nSo even if you only provide an `sync` implementation of a tool, you could still use the `ainvoke` interface, but there are some important things to know:\n\n* LangChain's by default provides an async implementation that assumes that the function is expensive to compute, so it'll delegate execution to another thread.\n* If you're working in an async codebase, you should create async tools rather than sync tools, to avoid incuring a small overhead due to that thread.\n* If you need both sync and async implementations, use `StructuredTool.from_function` or sub-class from `BaseTool`.\n* If implementing both sync and async, and the sync code is fast to run, override the default LangChain async implementation and simply call the sync code.\n* You CANNOT and SHOULD NOT use the sync `invoke` with an `async` tool.\n\n from langchain_core.tools import StructuredTooldef multiply(a: int, b: int) -> int: \"\"\"Multiply two numbers.\"\"\" return a * bcalculator = StructuredTool.from_function(func=multiply)print(calculator.invoke({\"a\": 2, \"b\": 3}))print( await calculator.ainvoke({\"a\": 2, \"b\": 5})) # Uses default LangChain async implementation incurs small overhead\n\n**API Reference:**[StructuredTool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.StructuredTool.html)\n\n 610\n\n from langchain_core.tools import StructuredTooldef multiply(a: int, b: int) -> int: \"\"\"Multiply two numbers.\"\"\" return a * basync def amultiply(a: int, b: int) -> int: \"\"\"Multiply two numbers.\"\"\" return a * bcalculator = StructuredTool.from_function(func=multiply, coroutine=amultiply)print(calculator.invoke({\"a\": 2, \"b\": 3}))print( await calculator.ainvoke({\"a\": 2, \"b\": 5})) # Uses use provided amultiply without additional overhead\n\n**API Reference:**[StructuredTool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.StructuredTool.html)\n\n 610\n\nYou should not and cannot use `.invoke` when providing only an async definition.\n\n @toolasync def multiply(a: int, b: int) -> int: \"\"\"Multiply two numbers.\"\"\" return a * btry: multiply.invoke({\"a\": 2, \"b\": 3})except NotImplementedError: print(\"Raised not implemented error. You should not be doing this.\")\n\n Raised not implemented error. You should not be doing this.\n\nHandling Tool Errors[​](#handling-tool-errors \"Direct link to Handling Tool Errors\")\n------------------------------------------------------------------------------------\n\nIf you're using tools with agents, you will likely need an error handling strategy, so the agent can recover from the error and continue execution.\n\nA simple strategy is to throw a `ToolException` from inside the tool and specify an error handler using `handle_tool_error`.\n\nWhen the error handler is specified, the exception will be caught and the error handler will decide which output to return from the tool.\n\nYou can set `handle_tool_error` to `True`, a string value, or a function. If it's a function, the function should take a `ToolException` as a parameter and return a value.\n\nPlease note that only raising a `ToolException` won't be effective. You need to first set the `handle_tool_error` of the tool because its default value is `False`.\n\n from langchain_core.tools import ToolExceptiondef get_weather(city: str) -> int: \"\"\"Get weather for the given city.\"\"\" raise ToolException(f\"Error: There is no city by the name of {city}.\")\n\n**API Reference:**[ToolException](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.ToolException.html)\n\nHere's an example with the default `handle_tool_error=True` behavior.\n\n get_weather_tool = StructuredTool.from_function( func=get_weather, handle_tool_error=True,)get_weather_tool.invoke({\"city\": \"foobar\"})\n\n 'Error: There is no city by the name of foobar.'\n\nWe can set `handle_tool_error` to a string that will always be returned.\n\n get_weather_tool = StructuredTool.from_function( func=get_weather, handle_tool_error=\"There is no such city, but it's probably above 0K there!\",)get_weather_tool.invoke({\"city\": \"foobar\"})\n\n \"There is no such city, but it's probably above 0K there!\"\n\nHandling the error using a function:\n\n def _handle_error(error: ToolException) -> str: return f\"The following errors occurred during tool execution: `{error.args[0]}`\"get_weather_tool = StructuredTool.from_function( func=get_weather, handle_tool_error=_handle_error,)get_weather_tool.invoke({\"city\": \"foobar\"})\n\n 'The following errors occurred during tool execution: `Error: There is no city by the name of foobar.`'\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/custom_tools.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nCustom Retriever\n\n](/v0.2/docs/how_to/custom_retriever/)[\n\nNext\n\nHow to debug your LLM apps\n\n](/v0.2/docs/how_to/debugging/)\n\n* [@tool decorator](#tool-decorator)\n* [StructuredTool](#structuredtool)\n* [Subclass BaseTool](#subclass-basetool)\n* [How to create async tools](#how-to-create-async-tools)\n* [Handling Tool Errors](#handling-tool-errors)"},"last_modified":{"kind":"null"}}},{"rowIdx":1342,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/callbacks_async/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to use callbacks in async environments\n\nOn this page\n\nHow to use callbacks in async environments\n==========================================\n\nPrerequisites\n\nThis guide assumes familiarity with the following concepts:\n\n* [Callbacks](/v0.2/docs/concepts/#callbacks)\n* [Custom callback handlers](/v0.2/docs/how_to/custom_callbacks/)\n\nIf you are planning to use the async APIs, it is recommended to use and extend [`AsyncCallbackHandler`](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.AsyncCallbackHandler.html) to avoid blocking the event.\n\ndanger\n\nIf you use a sync `CallbackHandler` while using an async method to run your LLM / Chain / Tool / Agent, it will still work. However, under the hood, it will be called with [`run_in_executor`](https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.loop.run_in_executor) which can cause issues if your `CallbackHandler` is not thread-safe.\n\ndanger\n\nIf you're on `python<=3.10`, you need to remember to propagate `config` or `callbacks` when invoking other `runnable` from within a `RunnableLambda`, `RunnableGenerator` or `@tool`. If you do not do this, the callbacks will not be propagated to the child runnables being invoked.\n\n import asynciofrom typing import Any, Dict, Listfrom langchain_anthropic import ChatAnthropicfrom langchain_core.callbacks import AsyncCallbackHandler, BaseCallbackHandlerfrom langchain_core.messages import HumanMessagefrom langchain_core.outputs import LLMResultclass MyCustomSyncHandler(BaseCallbackHandler): def on_llm_new_token(self, token: str, **kwargs) -> None: print(f\"Sync handler being called in a `thread_pool_executor`: token: {token}\")class MyCustomAsyncHandler(AsyncCallbackHandler): \"\"\"Async callback handler that can be used to handle callbacks from langchain.\"\"\" async def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: \"\"\"Run when chain starts running.\"\"\" print(\"zzzz....\") await asyncio.sleep(0.3) class_name = serialized[\"name\"] print(\"Hi! I just woke up. Your llm is starting\") async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: \"\"\"Run when chain ends running.\"\"\" print(\"zzzz....\") await asyncio.sleep(0.3) print(\"Hi! I just woke up. Your llm is ending\")# To enable streaming, we pass in `streaming=True` to the ChatModel constructor# Additionally, we pass in a list with our custom handlerchat = ChatAnthropic( model=\"claude-3-sonnet-20240229\", max_tokens=25, streaming=True, callbacks=[MyCustomSyncHandler(), MyCustomAsyncHandler()],)await chat.agenerate([[HumanMessage(content=\"Tell me a joke\")]])\n\n**API Reference:**[ChatAnthropic](https://api.python.langchain.com/en/latest/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html) | [AsyncCallbackHandler](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.AsyncCallbackHandler.html) | [BaseCallbackHandler](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html) | [HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) | [LLMResult](https://api.python.langchain.com/en/latest/outputs/langchain_core.outputs.llm_result.LLMResult.html)\n\n zzzz....Hi! I just woke up. Your llm is startingSync handler being called in a `thread_pool_executor`: token: HereSync handler being called in a `thread_pool_executor`: token: 'sSync handler being called in a `thread_pool_executor`: token: aSync handler being called in a `thread_pool_executor`: token: littleSync handler being called in a `thread_pool_executor`: token: jokeSync handler being called in a `thread_pool_executor`: token: forSync handler being called in a `thread_pool_executor`: token: youSync handler being called in a `thread_pool_executor`: token: :Sync handler being called in a `thread_pool_executor`: token: WhySync handler being called in a `thread_pool_executor`: token: canSync handler being called in a `thread_pool_executor`: token: 'tSync handler being called in a `thread_pool_executor`: token: aSync handler being called in a `thread_pool_executor`: token: bicycleSync handler being called in a `thread_pool_executor`: token: stanSync handler being called in a `thread_pool_executor`: token: d upSync handler being called in a `thread_pool_executor`: token: bySync handler being called in a `thread_pool_executor`: token: itselfSync handler being called in a `thread_pool_executor`: token: ?Sync handler being called in a `thread_pool_executor`: token: BecauseSync handler being called in a `thread_pool_executor`: token: itSync handler being called in a `thread_pool_executor`: token: 'sSync handler being called in a `thread_pool_executor`: token: twoSync handler being called in a `thread_pool_executor`: token: -Sync handler being called in a `thread_pool_executor`: token: tirezzzz....Hi! I just woke up. Your llm is ending\n\n LLMResult(generations=[[ChatGeneration(text=\"Here's a little joke for you:\\n\\nWhy can't a bicycle stand up by itself? Because it's two-tire\", message=AIMessage(content=\"Here's a little joke for you:\\n\\nWhy can't a bicycle stand up by itself? Because it's two-tire\", id='run-8afc89e8-02c0-4522-8480-d96977240bd4-0'))]], llm_output={}, run=[RunInfo(run_id=UUID('8afc89e8-02c0-4522-8480-d96977240bd4'))])\n\nNext steps[​](#next-steps \"Direct link to Next steps\")\n------------------------------------------------------\n\nYou've now learned how to create your own custom callback handlers.\n\nNext, check out the other how-to guides in this section, such as [how to attach callbacks to a runnable](/v0.2/docs/how_to/callbacks_attach/).\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/callbacks_async.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nCaching\n\n](/v0.2/docs/how_to/caching_embeddings/)[\n\nNext\n\nHow to attach callbacks to a runnable\n\n](/v0.2/docs/how_to/callbacks_attach/)\n\n* [Next steps](#next-steps)"},"last_modified":{"kind":"null"}}},{"rowIdx":1343,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/document_loader_csv/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to load CSVs\n\nOn this page\n\nHow to load CSVs\n================\n\nA [comma-separated values (CSV)](https://en.wikipedia.org/wiki/Comma-separated_values) file is a delimited text file that uses a comma to separate values. Each line of the file is a data record. Each record consists of one or more fields, separated by commas.\n\nLangChain implements a [CSV Loader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.csv_loader.CSVLoader.html) that will load CSV files into a sequence of [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects. Each row of the CSV file is translated to one document.\n\n from langchain_community.document_loaders.csv_loader import CSVLoaderfile_path = ( \"../../../docs/integrations/document_loaders/example_data/mlb_teams_2012.csv\")loader = CSVLoader(file_path=file_path)data = loader.load()for record in data[:2]: print(record)\n\n**API Reference:**[CSVLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.csv_loader.CSVLoader.html)\n\n page_content='Team: Nationals\\n\"Payroll (millions)\": 81.34\\n\"Wins\": 98' metadata={'source': '../../../docs/integrations/document_loaders/example_data/mlb_teams_2012.csv', 'row': 0}page_content='Team: Reds\\n\"Payroll (millions)\": 82.20\\n\"Wins\": 97' metadata={'source': '../../../docs/integrations/document_loaders/example_data/mlb_teams_2012.csv', 'row': 1}\n\nCustomizing the CSV parsing and loading[​](#customizing-the-csv-parsing-and-loading \"Direct link to Customizing the CSV parsing and loading\")\n---------------------------------------------------------------------------------------------------------------------------------------------\n\n`CSVLoader` will accept a `csv_args` kwarg that supports customization of arguments passed to Python's `csv.DictReader`. See the [csv module](https://docs.python.org/3/library/csv.html) documentation for more information of what csv args are supported.\n\n loader = CSVLoader( file_path=file_path, csv_args={ \"delimiter\": \",\", \"quotechar\": '\"', \"fieldnames\": [\"MLB Team\", \"Payroll in millions\", \"Wins\"], },)data = loader.load()for record in data[:2]: print(record)\n\n page_content='MLB Team: Team\\nPayroll in millions: \"Payroll (millions)\"\\nWins: \"Wins\"' metadata={'source': '../../../docs/integrations/document_loaders/example_data/mlb_teams_2012.csv', 'row': 0}page_content='MLB Team: Nationals\\nPayroll in millions: 81.34\\nWins: 98' metadata={'source': '../../../docs/integrations/document_loaders/example_data/mlb_teams_2012.csv', 'row': 1}\n\nSpecify a column to identify the document source[​](#specify-a-column-to-identify-the-document-source \"Direct link to Specify a column to identify the document source\")\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\nThe `\"source\"` key on [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) metadata can be set using a column of the CSV. Use the `source_column` argument to specify a source for the document created from each row. Otherwise `file_path` will be used as the source for all documents created from the CSV file.\n\nThis is useful when using documents loaded from CSV files for chains that answer questions using sources.\n\n loader = CSVLoader(file_path=file_path, source_column=\"Team\")data = loader.load()for record in data[:2]: print(record)\n\n page_content='Team: Nationals\\n\"Payroll (millions)\": 81.34\\n\"Wins\": 98' metadata={'source': 'Nationals', 'row': 0}page_content='Team: Reds\\n\"Payroll (millions)\": 82.20\\n\"Wins\": 97' metadata={'source': 'Reds', 'row': 1}\n\nLoad from a string[​](#load-from-a-string \"Direct link to Load from a string\")\n------------------------------------------------------------------------------\n\nPython's `tempfile` can be used when working with CSV strings directly.\n\n import tempfilefrom io import StringIOstring_data = \"\"\"\"Team\", \"Payroll (millions)\", \"Wins\"\"Nationals\", 81.34, 98\"Reds\", 82.20, 97\"Yankees\", 197.96, 95\"Giants\", 117.62, 94\"\"\".strip()with tempfile.NamedTemporaryFile(delete=False, mode=\"w+\") as temp_file: temp_file.write(string_data) temp_file_path = temp_file.nameloader = CSVLoader(file_path=temp_file_path)loader.load()for record in data[:2]: print(record)\n\n page_content='Team: Nationals\\n\"Payroll (millions)\": 81.34\\n\"Wins\": 98' metadata={'source': 'Nationals', 'row': 0}page_content='Team: Reds\\n\"Payroll (millions)\": 82.20\\n\"Wins\": 97' metadata={'source': 'Reds', 'row': 1}\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/document_loader_csv.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to debug your LLM apps\n\n](/v0.2/docs/how_to/debugging/)[\n\nNext\n\nHow to load documents from a directory\n\n](/v0.2/docs/how_to/document_loader_directory/)\n\n* [Customizing the CSV parsing and loading](#customizing-the-csv-parsing-and-loading)\n* [Specify a column to identify the document source](#specify-a-column-to-identify-the-document-source)\n* [Load from a string](#load-from-a-string)"},"last_modified":{"kind":"null"}}},{"rowIdx":1344,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/document_loader_directory/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to load documents from a directory\n\nOn this page\n\nHow to load documents from a directory\n======================================\n\nLangChain's [DirectoryLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.directory.DirectoryLoader.html) implements functionality for reading files from disk into LangChain [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects. Here we demonstrate:\n\n* How to load from a filesystem, including use of wildcard patterns;\n* How to use multithreading for file I/O;\n* How to use custom loader classes to parse specific file types (e.g., code);\n* How to handle errors, such as those due to decoding.\n\n from langchain_community.document_loaders import DirectoryLoader\n\n**API Reference:**[DirectoryLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.directory.DirectoryLoader.html)\n\n`DirectoryLoader` accepts a `loader_cls` kwarg, which defaults to [UnstructuredLoader](/v0.2/docs/integrations/document_loaders/unstructured_file/). [Unstructured](https://unstructured-io.github.io/unstructured/) supports parsing for a number of formats, such as PDF and HTML. Here we use it to read in a markdown (.md) file.\n\nWe can use the `glob` parameter to control which files to load. Note that here it doesn't load the `.rst` file or the `.html` files.\n\n loader = DirectoryLoader(\"../\", glob=\"**/*.md\")docs = loader.load()len(docs)\n\n 20\n\n print(docs[0].page_content[:100])\n\n SecurityLangChain has a large ecosystem of integrations with various external resources like local\n\nShow a progress bar[​](#show-a-progress-bar \"Direct link to Show a progress bar\")\n---------------------------------------------------------------------------------\n\nBy default a progress bar will not be shown. To show a progress bar, install the `tqdm` library (e.g. `pip install tqdm`), and set the `show_progress` parameter to `True`.\n\n loader = DirectoryLoader(\"../\", glob=\"**/*.md\", show_progress=True)docs = loader.load()\n\n 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 20/20 [00:00<00:00, 54.56it/s]\n\nUse multithreading[​](#use-multithreading \"Direct link to Use multithreading\")\n------------------------------------------------------------------------------\n\nBy default the loading happens in one thread. In order to utilize several threads set the `use_multithreading` flag to true.\n\n loader = DirectoryLoader(\"../\", glob=\"**/*.md\", use_multithreading=True)docs = loader.load()\n\nChange loader class[​](#change-loader-class \"Direct link to Change loader class\")\n---------------------------------------------------------------------------------\n\nBy default this uses the `UnstructuredLoader` class. To customize the loader, specify the loader class in the `loader_cls` kwarg. Below we show an example using [TextLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.text.TextLoader.html):\n\n from langchain_community.document_loaders import TextLoaderloader = DirectoryLoader(\"../\", glob=\"**/*.md\", loader_cls=TextLoader)docs = loader.load()\n\n**API Reference:**[TextLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.text.TextLoader.html)\n\n print(docs[0].page_content[:100])\n\n # SecurityLangChain has a large ecosystem of integrations with various external resources like loc\n\nNotice that while the `UnstructuredLoader` parses Markdown headers, `TextLoader` does not.\n\nIf you need to load Python source code files, use the `PythonLoader`:\n\n from langchain_community.document_loaders import PythonLoaderloader = DirectoryLoader(\"../../../../../\", glob=\"**/*.py\", loader_cls=PythonLoader)\n\n**API Reference:**[PythonLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.python.PythonLoader.html)\n\nAuto-detect file encodings with TextLoader[​](#auto-detect-file-encodings-with-textloader \"Direct link to Auto-detect file encodings with TextLoader\")\n------------------------------------------------------------------------------------------------------------------------------------------------------\n\n`DirectoryLoader` can help manage errors due to variations in file encodings. Below we will attempt to load in a collection of files, one of which includes non-UTF8 encodings.\n\n path = \"../../../../libs/langchain/tests/unit_tests/examples/\"loader = DirectoryLoader(path, glob=\"**/*.txt\", loader_cls=TextLoader)\n\n### A. Default Behavior[​](#a-default-behavior \"Direct link to A. Default Behavior\")\n\nBy default we raise an error:\n\n loader.load()\n\n Error loading file ../../../../libs/langchain/tests/unit_tests/examples/example-non-utf8.txt\n\n ---------------------------------------------------------------------------``````outputUnicodeDecodeError Traceback (most recent call last)``````outputFile ~/repos/langchain/libs/community/langchain_community/document_loaders/text.py:43, in TextLoader.lazy_load(self) 42 with open(self.file_path, encoding=self.encoding) as f:---> 43 text = f.read() 44 except UnicodeDecodeError as e:``````outputFile ~/.pyenv/versions/3.10.4/lib/python3.10/codecs.py:322, in BufferedIncrementalDecoder.decode(self, input, final) 321 data = self.buffer + input--> 322 (result, consumed) = self._buffer_decode(data, self.errors, final) 323 # keep undecoded input until the next call``````outputUnicodeDecodeError: 'utf-8' codec can't decode byte 0xca in position 0: invalid continuation byte``````outputThe above exception was the direct cause of the following exception:``````outputRuntimeError Traceback (most recent call last)``````outputCell In[10], line 1----> 1 loader.load()``````outputFile ~/repos/langchain/libs/community/langchain_community/document_loaders/directory.py:117, in DirectoryLoader.load(self) 115 def load(self) -> List[Document]: 116 \"\"\"Load documents.\"\"\"--> 117 return list(self.lazy_load())``````outputFile ~/repos/langchain/libs/community/langchain_community/document_loaders/directory.py:182, in DirectoryLoader.lazy_load(self) 180 else: 181 for i in items:--> 182 yield from self._lazy_load_file(i, p, pbar) 184 if pbar: 185 pbar.close()``````outputFile ~/repos/langchain/libs/community/langchain_community/document_loaders/directory.py:220, in DirectoryLoader._lazy_load_file(self, item, path, pbar) 218 else: 219 logger.error(f\"Error loading file {str(item)}\")--> 220 raise e 221 finally: 222 if pbar:``````outputFile ~/repos/langchain/libs/community/langchain_community/document_loaders/directory.py:210, in DirectoryLoader._lazy_load_file(self, item, path, pbar) 208 loader = self.loader_cls(str(item), **self.loader_kwargs) 209 try:--> 210 for subdoc in loader.lazy_load(): 211 yield subdoc 212 except NotImplementedError:``````outputFile ~/repos/langchain/libs/community/langchain_community/document_loaders/text.py:56, in TextLoader.lazy_load(self) 54 continue 55 else:---> 56 raise RuntimeError(f\"Error loading {self.file_path}\") from e 57 except Exception as e: 58 raise RuntimeError(f\"Error loading {self.file_path}\") from e``````outputRuntimeError: Error loading ../../../../libs/langchain/tests/unit_tests/examples/example-non-utf8.txt\n\nThe file `example-non-utf8.txt` uses a different encoding, so the `load()` function fails with a helpful message indicating which file failed decoding.\n\nWith the default behavior of `TextLoader` any failure to load any of the documents will fail the whole loading process and no documents are loaded.\n\n### B. Silent fail[​](#b-silent-fail \"Direct link to B. Silent fail\")\n\nWe can pass the parameter `silent_errors` to the `DirectoryLoader` to skip the files which could not be loaded and continue the load process.\n\n loader = DirectoryLoader( path, glob=\"**/*.txt\", loader_cls=TextLoader, silent_errors=True)docs = loader.load()\n\n Error loading file ../../../../libs/langchain/tests/unit_tests/examples/example-non-utf8.txt: Error loading ../../../../libs/langchain/tests/unit_tests/examples/example-non-utf8.txt\n\n doc_sources = [doc.metadata[\"source\"] for doc in docs]doc_sources\n\n ['../../../../libs/langchain/tests/unit_tests/examples/example-utf8.txt']\n\n### C. Auto detect encodings[​](#c-auto-detect-encodings \"Direct link to C. Auto detect encodings\")\n\nWe can also ask `TextLoader` to auto detect the file encoding before failing, by passing the `autodetect_encoding` to the loader class.\n\n text_loader_kwargs = {\"autodetect_encoding\": True}loader = DirectoryLoader( path, glob=\"**/*.txt\", loader_cls=TextLoader, loader_kwargs=text_loader_kwargs)docs = loader.load()\n\n doc_sources = [doc.metadata[\"source\"] for doc in docs]doc_sources\n\n ['../../../../libs/langchain/tests/unit_tests/examples/example-utf8.txt', '../../../../libs/langchain/tests/unit_tests/examples/example-non-utf8.txt']\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/document_loader_directory.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to load CSVs\n\n](/v0.2/docs/how_to/document_loader_csv/)[\n\nNext\n\nHow to load HTML\n\n](/v0.2/docs/how_to/document_loader_html/)\n\n* [Show a progress bar](#show-a-progress-bar)\n* [Use multithreading](#use-multithreading)\n* [Change loader class](#change-loader-class)\n* [Auto-detect file encodings with TextLoader](#auto-detect-file-encodings-with-textloader)\n * [A. Default Behavior](#a-default-behavior)\n * [B. Silent fail](#b-silent-fail)\n * [C. Auto detect encodings](#c-auto-detect-encodings)"},"last_modified":{"kind":"null"}}},{"rowIdx":1345,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/callbacks_attach/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to attach callbacks to a runnable\n\nOn this page\n\nHow to attach callbacks to a runnable\n=====================================\n\nPrerequisites\n\nThis guide assumes familiarity with the following concepts:\n\n* [Callbacks](/v0.2/docs/concepts/#callbacks)\n* [Custom callback handlers](/v0.2/docs/how_to/custom_callbacks/)\n* [Chaining runnables](/v0.2/docs/how_to/sequence/)\n* [Attach runtime arguments to a Runnable](/v0.2/docs/how_to/binding/)\n\nIf you are composing a chain of runnables and want to reuse callbacks across multiple executions, you can attach callbacks with the [`.with_config()`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_config) method. This saves you the need to pass callbacks in each time you invoke the chain.\n\ninfo\n\n`with_config()` binds a configuration which will be interpreted as **runtime** configuration. So these callbacks will propagate to all child components.\n\nHere's an example:\n\n from typing import Any, Dict, Listfrom langchain_anthropic import ChatAnthropicfrom langchain_core.callbacks import BaseCallbackHandlerfrom langchain_core.messages import BaseMessagefrom langchain_core.outputs import LLMResultfrom langchain_core.prompts import ChatPromptTemplateclass LoggingHandler(BaseCallbackHandler): def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs ) -> None: print(\"Chat model started\") def on_llm_end(self, response: LLMResult, **kwargs) -> None: print(f\"Chat model ended, response: {response}\") def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs ) -> None: print(f\"Chain {serialized.get('name')} started\") def on_chain_end(self, outputs: Dict[str, Any], **kwargs) -> None: print(f\"Chain ended, outputs: {outputs}\")callbacks = [LoggingHandler()]llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\")prompt = ChatPromptTemplate.from_template(\"What is 1 + {number}?\")chain = prompt | llmchain_with_callbacks = chain.with_config(callbacks=callbacks)chain_with_callbacks.invoke({\"number\": \"2\"})\n\n**API Reference:**[ChatAnthropic](https://api.python.langchain.com/en/latest/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html) | [BaseCallbackHandler](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html) | [BaseMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.base.BaseMessage.html) | [LLMResult](https://api.python.langchain.com/en/latest/outputs/langchain_core.outputs.llm_result.LLMResult.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html)\n\n Chain RunnableSequence startedChain ChatPromptTemplate startedChain ended, outputs: messages=[HumanMessage(content='What is 1 + 2?')]Chat model startedChat model ended, response: generations=[[ChatGeneration(text='1 + 2 = 3', message=AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01NTYMsH9YxkoWsiPYs4Lemn', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-d6bcfd72-9c94-466d-bac0-f39e456ad6e3-0'))]] llm_output={'id': 'msg_01NTYMsH9YxkoWsiPYs4Lemn', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} run=NoneChain ended, outputs: content='1 + 2 = 3' response_metadata={'id': 'msg_01NTYMsH9YxkoWsiPYs4Lemn', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} id='run-d6bcfd72-9c94-466d-bac0-f39e456ad6e3-0'\n\n AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01NTYMsH9YxkoWsiPYs4Lemn', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-d6bcfd72-9c94-466d-bac0-f39e456ad6e3-0')\n\nThe bound callbacks will run for all nested module runs.\n\nNext steps[​](#next-steps \"Direct link to Next steps\")\n------------------------------------------------------\n\nYou've now learned how to attach callbacks to a chain.\n\nNext, check out the other how-to guides in this section, such as how to [pass callbacks in at runtime](/v0.2/docs/how_to/callbacks_runtime/).\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/callbacks_attach.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to use callbacks in async environments\n\n](/v0.2/docs/how_to/callbacks_async/)[\n\nNext\n\nHow to propagate callbacks constructor\n\n](/v0.2/docs/how_to/callbacks_constructor/)\n\n* [Next steps](#next-steps)"},"last_modified":{"kind":"null"}}},{"rowIdx":1346,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/document_loader_html/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to load HTML\n\nOn this page\n\nHow to load HTML\n================\n\nThe HyperText Markup Language or [HTML](https://en.wikipedia.org/wiki/HTML) is the standard markup language for documents designed to be displayed in a web browser.\n\nThis covers how to load `HTML` documents into a LangChain [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects that we can use downstream.\n\nParsing HTML files often requires specialized tools. Here we demonstrate parsing via [Unstructured](https://unstructured-io.github.io/unstructured/) and [BeautifulSoup4](https://beautiful-soup-4.readthedocs.io/en/latest/), which can be installed via pip. Head over to the integrations page to find integrations with additional services, such as [Azure AI Document Intelligence](/v0.2/docs/integrations/document_loaders/azure_document_intelligence/) or [FireCrawl](/v0.2/docs/integrations/document_loaders/firecrawl/).\n\nLoading HTML with Unstructured[​](#loading-html-with-unstructured \"Direct link to Loading HTML with Unstructured\")\n------------------------------------------------------------------------------------------------------------------\n\n %pip install \"unstructured[html]\"\n\n from langchain_community.document_loaders import UnstructuredHTMLLoaderfile_path = \"../../../docs/integrations/document_loaders/example_data/fake-content.html\"loader = UnstructuredHTMLLoader(file_path)data = loader.load()print(data)\n\n**API Reference:**[UnstructuredHTMLLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.html.UnstructuredHTMLLoader.html)\n\n [Document(page_content='My First Heading\\n\\nMy first paragraph.', metadata={'source': '../../../docs/integrations/document_loaders/example_data/fake-content.html'})]\n\nLoading HTML with BeautifulSoup4[​](#loading-html-with-beautifulsoup4 \"Direct link to Loading HTML with BeautifulSoup4\")\n------------------------------------------------------------------------------------------------------------------------\n\nWe can also use `BeautifulSoup4` to load HTML documents using the `BSHTMLLoader`. This will extract the text from the HTML into `page_content`, and the page title as `title` into `metadata`.\n\n %pip install bs4\n\n from langchain_community.document_loaders import BSHTMLLoaderloader = BSHTMLLoader(file_path)data = loader.load()print(data)\n\n**API Reference:**[BSHTMLLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.html_bs.BSHTMLLoader.html)\n\n [Document(page_content='\\nTest Title\\n\\n\\nMy First Heading\\nMy first paragraph.\\n\\n\\n', metadata={'source': '../../../docs/integrations/document_loaders/example_data/fake-content.html', 'title': 'Test Title'})]\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/document_loader_html.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to load documents from a directory\n\n](/v0.2/docs/how_to/document_loader_directory/)[\n\nNext\n\nHow to load JSON\n\n](/v0.2/docs/how_to/document_loader_json/)\n\n* [Loading HTML with Unstructured](#loading-html-with-unstructured)\n* [Loading HTML with BeautifulSoup4](#loading-html-with-beautifulsoup4)"},"last_modified":{"kind":"null"}}},{"rowIdx":1347,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/callbacks_constructor/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to propagate callbacks constructor\n\nOn this page\n\nHow to propagate callbacks constructor\n======================================\n\nPrerequisites\n\nThis guide assumes familiarity with the following concepts:\n\n* [Callbacks](/v0.2/docs/concepts/#callbacks)\n* [Custom callback handlers](/v0.2/docs/how_to/custom_callbacks/)\n\nMost LangChain modules allow you to pass `callbacks` directly into the constructor (i.e., initializer). In this case, the callbacks will only be called for that instance (and any nested runs).\n\ndanger\n\nConstructor callbacks are scoped only to the object they are defined on. They are **not** inherited by children of the object. This can lead to confusing behavior, and it's generally better to pass callbacks as a run time argument.\n\nHere's an example:\n\n from typing import Any, Dict, Listfrom langchain_anthropic import ChatAnthropicfrom langchain_core.callbacks import BaseCallbackHandlerfrom langchain_core.messages import BaseMessagefrom langchain_core.outputs import LLMResultfrom langchain_core.prompts import ChatPromptTemplateclass LoggingHandler(BaseCallbackHandler): def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs ) -> None: print(\"Chat model started\") def on_llm_end(self, response: LLMResult, **kwargs) -> None: print(f\"Chat model ended, response: {response}\") def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs ) -> None: print(f\"Chain {serialized.get('name')} started\") def on_chain_end(self, outputs: Dict[str, Any], **kwargs) -> None: print(f\"Chain ended, outputs: {outputs}\")callbacks = [LoggingHandler()]llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\", callbacks=callbacks)prompt = ChatPromptTemplate.from_template(\"What is 1 + {number}?\")chain = prompt | llmchain.invoke({\"number\": \"2\"})\n\n**API Reference:**[ChatAnthropic](https://api.python.langchain.com/en/latest/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html) | [BaseCallbackHandler](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html) | [BaseMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.base.BaseMessage.html) | [LLMResult](https://api.python.langchain.com/en/latest/outputs/langchain_core.outputs.llm_result.LLMResult.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html)\n\n Chat model startedChat model ended, response: generations=[[ChatGeneration(text='1 + 2 = 3', message=AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01CdKsRmeS9WRb8BWnHDEHm7', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-2d7fdf2a-7405-4e17-97c0-67e6b2a65305-0'))]] llm_output={'id': 'msg_01CdKsRmeS9WRb8BWnHDEHm7', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} run=None\n\n AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01CdKsRmeS9WRb8BWnHDEHm7', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-2d7fdf2a-7405-4e17-97c0-67e6b2a65305-0')\n\nYou can see that we only see events from the chat model run - no chain events from the prompt or broader chain.\n\nNext steps[​](#next-steps \"Direct link to Next steps\")\n------------------------------------------------------\n\nYou've now learned how to pass callbacks into a constructor.\n\nNext, check out the other how-to guides in this section, such as how to [pass callbacks at runtime](/v0.2/docs/how_to/callbacks_runtime/).\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/callbacks_constructor.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to attach callbacks to a runnable\n\n](/v0.2/docs/how_to/callbacks_attach/)[\n\nNext\n\nHow to pass callbacks in at runtime\n\n](/v0.2/docs/how_to/callbacks_runtime/)\n\n* [Next steps](#next-steps)"},"last_modified":{"kind":"null"}}},{"rowIdx":1348,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/callbacks_runtime/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to pass callbacks in at runtime\n\nOn this page\n\nHow to pass callbacks in at runtime\n===================================\n\nPrerequisites\n\nThis guide assumes familiarity with the following concepts:\n\n* [Callbacks](/v0.2/docs/concepts/#callbacks)\n* [Custom callback handlers](/v0.2/docs/how_to/custom_callbacks/)\n\nIn many cases, it is advantageous to pass in handlers instead when running the object. When we pass through [`CallbackHandlers`](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) using the `callbacks` keyword arg when executing an run, those callbacks will be issued by all nested objects involved in the execution. For example, when a handler is passed through to an Agent, it will be used for all callbacks related to the agent and all the objects involved in the agent's execution, in this case, the Tools and LLM.\n\nThis prevents us from having to manually attach the handlers to each individual nested object. Here's an example:\n\n from typing import Any, Dict, Listfrom langchain_anthropic import ChatAnthropicfrom langchain_core.callbacks import BaseCallbackHandlerfrom langchain_core.messages import BaseMessagefrom langchain_core.outputs import LLMResultfrom langchain_core.prompts import ChatPromptTemplateclass LoggingHandler(BaseCallbackHandler): def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs ) -> None: print(\"Chat model started\") def on_llm_end(self, response: LLMResult, **kwargs) -> None: print(f\"Chat model ended, response: {response}\") def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs ) -> None: print(f\"Chain {serialized.get('name')} started\") def on_chain_end(self, outputs: Dict[str, Any], **kwargs) -> None: print(f\"Chain ended, outputs: {outputs}\")callbacks = [LoggingHandler()]llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\")prompt = ChatPromptTemplate.from_template(\"What is 1 + {number}?\")chain = prompt | llmchain.invoke({\"number\": \"2\"}, config={\"callbacks\": callbacks})\n\n**API Reference:**[ChatAnthropic](https://api.python.langchain.com/en/latest/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html) | [BaseCallbackHandler](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html) | [BaseMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.base.BaseMessage.html) | [LLMResult](https://api.python.langchain.com/en/latest/outputs/langchain_core.outputs.llm_result.LLMResult.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html)\n\n Chain RunnableSequence startedChain ChatPromptTemplate startedChain ended, outputs: messages=[HumanMessage(content='What is 1 + 2?')]Chat model startedChat model ended, response: generations=[[ChatGeneration(text='1 + 2 = 3', message=AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01D8Tt5FdtBk5gLTfBPm2tac', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-bb0dddd8-85f3-4e6b-8553-eaa79f859ef8-0'))]] llm_output={'id': 'msg_01D8Tt5FdtBk5gLTfBPm2tac', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} run=NoneChain ended, outputs: content='1 + 2 = 3' response_metadata={'id': 'msg_01D8Tt5FdtBk5gLTfBPm2tac', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} id='run-bb0dddd8-85f3-4e6b-8553-eaa79f859ef8-0'\n\n AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01D8Tt5FdtBk5gLTfBPm2tac', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-bb0dddd8-85f3-4e6b-8553-eaa79f859ef8-0')\n\nIf there are already existing callbacks associated with a module, these will run in addition to any passed in at runtime.\n\nNext steps[​](#next-steps \"Direct link to Next steps\")\n------------------------------------------------------\n\nYou've now learned how to pass callbacks at runtime.\n\nNext, check out the other how-to guides in this section, such as how to [pass callbacks into a module constructor](/v0.2/docs/how_to/custom_callbacks/).\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/callbacks_runtime.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to propagate callbacks constructor\n\n](/v0.2/docs/how_to/callbacks_constructor/)[\n\nNext\n\nHow to split by character\n\n](/v0.2/docs/how_to/character_text_splitter/)\n\n* [Next steps](#next-steps)"},"last_modified":{"kind":"null"}}},{"rowIdx":1349,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/document_loader_markdown/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to load Markdown\n\nOn this page\n\nHow to load Markdown\n====================\n\n[Markdown](https://en.wikipedia.org/wiki/Markdown) is a lightweight markup language for creating formatted text using a plain-text editor.\n\nHere we cover how to load `Markdown` documents into LangChain [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects that we can use downstream.\n\nWe will cover:\n\n* Basic usage;\n* Parsing of Markdown into elements such as titles, list items, and text.\n\nLangChain implements an [UnstructuredMarkdownLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.markdown.UnstructuredMarkdownLoader.html) object which requires the [Unstructured](https://unstructured-io.github.io/unstructured/) package. First we install it:\n\n # !pip install \"unstructured[md]\"\n\nBasic usage will ingest a Markdown file to a single document. Here we demonstrate on LangChain's readme:\n\n from langchain_community.document_loaders import UnstructuredMarkdownLoaderfrom langchain_core.documents import Documentmarkdown_path = \"../../../../README.md\"loader = UnstructuredMarkdownLoader(markdown_path)data = loader.load()assert len(data) == 1assert isinstance(data[0], Document)readme_content = data[0].page_contentprint(readme_content[:250])\n\n**API Reference:**[UnstructuredMarkdownLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.markdown.UnstructuredMarkdownLoader.html) | [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html)\n\n 🦜️🔗 LangChain⚡ Build context-aware reasoning applications ⚡Looking for the JS/TS library? Check out LangChain.js.To help you ship LangChain apps to production faster, check out LangSmith. LangSmith is a unified developer platform for building,\n\nRetain Elements[​](#retain-elements \"Direct link to Retain Elements\")\n---------------------------------------------------------------------\n\nUnder the hood, Unstructured creates different \"elements\" for different chunks of text. By default we combine those together, but you can easily keep that separation by specifying `mode=\"elements\"`.\n\n loader = UnstructuredMarkdownLoader(markdown_path, mode=\"elements\")data = loader.load()print(f\"Number of documents: {len(data)}\\n\")for document in data[:2]: print(f\"{document}\\n\")\n\n Number of documents: 65page_content='🦜️🔗 LangChain' metadata={'source': '../../../../README.md', 'last_modified': '2024-04-29T13:40:19', 'page_number': 1, 'languages': ['eng'], 'filetype': 'text/markdown', 'file_directory': '../../../..', 'filename': 'README.md', 'category': 'Title'}page_content='⚡ Build context-aware reasoning applications ⚡' metadata={'source': '../../../../README.md', 'last_modified': '2024-04-29T13:40:19', 'page_number': 1, 'languages': ['eng'], 'parent_id': 'c3223b6f7100be08a78f1e8c0c28fde1', 'filetype': 'text/markdown', 'file_directory': '../../../..', 'filename': 'README.md', 'category': 'NarrativeText'}\n\nNote that in this case we recover three distinct element types:\n\n print(set(document.metadata[\"category\"] for document in data))\n\n {'Title', 'NarrativeText', 'ListItem'}\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/document_loader_markdown.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to load JSON\n\n](/v0.2/docs/how_to/document_loader_json/)[\n\nNext\n\nHow to load Microsoft Office files\n\n](/v0.2/docs/how_to/document_loader_office_file/)\n\n* [Retain Elements](#retain-elements)"},"last_modified":{"kind":"null"}}},{"rowIdx":1350,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/document_loader_json/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to load JSON\n\nOn this page\n\nHow to load JSON\n================\n\n[JSON (JavaScript Object Notation)](https://en.wikipedia.org/wiki/JSON) is an open standard file format and data interchange format that uses human-readable text to store and transmit data objects consisting of attribute–value pairs and arrays (or other serializable values).\n\n[JSON Lines](https://jsonlines.org/) is a file format where each line is a valid JSON value.\n\nLangChain implements a [JSONLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.json_loader.JSONLoader.html) to convert JSON and JSONL data into LangChain [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects. It uses a specified [jq schema](https://en.wikipedia.org/wiki/Jq_\\(programming_language\\)) to parse the JSON files, allowing for the extraction of specific fields into the content and metadata of the LangChain Document.\n\nIt uses the `jq` python package. Check out this [manual](https://stedolan.github.io/jq/manual/#Basicfilters) for a detailed documentation of the `jq` syntax.\n\nHere we will demonstrate:\n\n* How to load JSON and JSONL data into the content of a LangChain `Document`;\n* How to load JSON and JSONL data into metadata associated with a `Document`.\n\n #!pip install jq\n\n from langchain_community.document_loaders import JSONLoader\n\n**API Reference:**[JSONLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.json_loader.JSONLoader.html)\n\n import jsonfrom pathlib import Pathfrom pprint import pprintfile_path='./example_data/facebook_chat.json'data = json.loads(Path(file_path).read_text())\n\n pprint(data)\n\n {'image': {'creation_timestamp': 1675549016, 'uri': 'image_of_the_chat.jpg'}, 'is_still_participant': True, 'joinable_mode': {'link': '', 'mode': 1}, 'magic_words': [], 'messages': [{'content': 'Bye!', 'sender_name': 'User 2', 'timestamp_ms': 1675597571851}, {'content': 'Oh no worries! Bye', 'sender_name': 'User 1', 'timestamp_ms': 1675597435669}, {'content': 'No Im sorry it was my mistake, the blue one is not ' 'for sale', 'sender_name': 'User 2', 'timestamp_ms': 1675596277579}, {'content': 'I thought you were selling the blue one!', 'sender_name': 'User 1', 'timestamp_ms': 1675595140251}, {'content': 'Im not interested in this bag. Im interested in the ' 'blue one!', 'sender_name': 'User 1', 'timestamp_ms': 1675595109305}, {'content': 'Here is $129', 'sender_name': 'User 2', 'timestamp_ms': 1675595068468}, {'photos': [{'creation_timestamp': 1675595059, 'uri': 'url_of_some_picture.jpg'}], 'sender_name': 'User 2', 'timestamp_ms': 1675595060730}, {'content': 'Online is at least $100', 'sender_name': 'User 2', 'timestamp_ms': 1675595045152}, {'content': 'How much do you want?', 'sender_name': 'User 1', 'timestamp_ms': 1675594799696}, {'content': 'Goodmorning! $50 is too low.', 'sender_name': 'User 2', 'timestamp_ms': 1675577876645}, {'content': 'Hi! Im interested in your bag. Im offering $50. Let ' 'me know if you are interested. Thanks!', 'sender_name': 'User 1', 'timestamp_ms': 1675549022673}], 'participants': [{'name': 'User 1'}, {'name': 'User 2'}], 'thread_path': 'inbox/User 1 and User 2 chat', 'title': 'User 1 and User 2 chat'}\n\nUsing `JSONLoader`[​](#using-jsonloader \"Direct link to using-jsonloader\")\n--------------------------------------------------------------------------\n\nSuppose we are interested in extracting the values under the `content` field within the `messages` key of the JSON data. This can easily be done through the `JSONLoader` as shown below.\n\n### JSON file[​](#json-file \"Direct link to JSON file\")\n\n loader = JSONLoader( file_path='./example_data/facebook_chat.json', jq_schema='.messages[].content', text_content=False)data = loader.load()\n\n pprint(data)\n\n [Document(page_content='Bye!', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 1}), Document(page_content='Oh no worries! Bye', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 2}), Document(page_content='No Im sorry it was my mistake, the blue one is not for sale', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 3}), Document(page_content='I thought you were selling the blue one!', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 4}), Document(page_content='Im not interested in this bag. Im interested in the blue one!', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 5}), Document(page_content='Here is $129', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 6}), Document(page_content='', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 7}), Document(page_content='Online is at least $100', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 8}), Document(page_content='How much do you want?', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 9}), Document(page_content='Goodmorning! $50 is too low.', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 10}), Document(page_content='Hi! Im interested in your bag. Im offering $50. Let me know if you are interested. Thanks!', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 11})]\n\n### JSON Lines file[​](#json-lines-file \"Direct link to JSON Lines file\")\n\nIf you want to load documents from a JSON Lines file, you pass `json_lines=True` and specify `jq_schema` to extract `page_content` from a single JSON object.\n\n file_path = './example_data/facebook_chat_messages.jsonl'pprint(Path(file_path).read_text())\n\n ('{\"sender_name\": \"User 2\", \"timestamp_ms\": 1675597571851, \"content\": \"Bye!\"}\\n' '{\"sender_name\": \"User 1\", \"timestamp_ms\": 1675597435669, \"content\": \"Oh no ' 'worries! Bye\"}\\n' '{\"sender_name\": \"User 2\", \"timestamp_ms\": 1675596277579, \"content\": \"No Im ' 'sorry it was my mistake, the blue one is not for sale\"}\\n')\n\n loader = JSONLoader( file_path='./example_data/facebook_chat_messages.jsonl', jq_schema='.content', text_content=False, json_lines=True)data = loader.load()\n\n pprint(data)\n\n [Document(page_content='Bye!', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat_messages.jsonl', 'seq_num': 1}), Document(page_content='Oh no worries! Bye', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat_messages.jsonl', 'seq_num': 2}), Document(page_content='No Im sorry it was my mistake, the blue one is not for sale', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat_messages.jsonl', 'seq_num': 3})]\n\nAnother option is set `jq_schema='.'` and provide `content_key`:\n\n loader = JSONLoader( file_path='./example_data/facebook_chat_messages.jsonl', jq_schema='.', content_key='sender_name', json_lines=True)data = loader.load()\n\n pprint(data)\n\n [Document(page_content='User 2', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat_messages.jsonl', 'seq_num': 1}), Document(page_content='User 1', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat_messages.jsonl', 'seq_num': 2}), Document(page_content='User 2', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat_messages.jsonl', 'seq_num': 3})]\n\n### JSON file with jq schema `content_key`[​](#json-file-with-jq-schema-content_key \"Direct link to json-file-with-jq-schema-content_key\")\n\nTo load documents from a JSON file using the content\\_key within the jq schema, set is\\_content\\_key\\_jq\\_parsable=True. Ensure that content\\_key is compatible and can be parsed using the jq schema.\n\n file_path = './sample.json'pprint(Path(file_path).read_text())\n\n {\"data\": [ {\"attributes\": { \"message\": \"message1\", \"tags\": [ \"tag1\"]}, \"id\": \"1\"}, {\"attributes\": { \"message\": \"message2\", \"tags\": [ \"tag2\"]}, \"id\": \"2\"}]}\n\n loader = JSONLoader( file_path=file_path, jq_schema=\".data[]\", content_key=\".attributes.message\", is_content_key_jq_parsable=True,)data = loader.load()\n\n pprint(data)\n\n [Document(page_content='message1', metadata={'source': '/path/to/sample.json', 'seq_num': 1}), Document(page_content='message2', metadata={'source': '/path/to/sample.json', 'seq_num': 2})]\n\nExtracting metadata[​](#extracting-metadata \"Direct link to Extracting metadata\")\n---------------------------------------------------------------------------------\n\nGenerally, we want to include metadata available in the JSON file into the documents that we create from the content.\n\nThe following demonstrates how metadata can be extracted using the `JSONLoader`.\n\nThere are some key changes to be noted. In the previous example where we didn't collect the metadata, we managed to directly specify in the schema where the value for the `page_content` can be extracted from.\n\n .messages[].content\n\nIn the current example, we have to tell the loader to iterate over the records in the `messages` field. The jq\\_schema then has to be:\n\n .messages[]\n\nThis allows us to pass the records (dict) into the `metadata_func` that has to be implemented. The `metadata_func` is responsible for identifying which pieces of information in the record should be included in the metadata stored in the final `Document` object.\n\nAdditionally, we now have to explicitly specify in the loader, via the `content_key` argument, the key from the record where the value for the `page_content` needs to be extracted from.\n\n # Define the metadata extraction function.def metadata_func(record: dict, metadata: dict) -> dict: metadata[\"sender_name\"] = record.get(\"sender_name\") metadata[\"timestamp_ms\"] = record.get(\"timestamp_ms\") return metadataloader = JSONLoader( file_path='./example_data/facebook_chat.json', jq_schema='.messages[]', content_key=\"content\", metadata_func=metadata_func)data = loader.load()\n\n pprint(data)\n\n [Document(page_content='Bye!', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 1, 'sender_name': 'User 2', 'timestamp_ms': 1675597571851}), Document(page_content='Oh no worries! Bye', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 2, 'sender_name': 'User 1', 'timestamp_ms': 1675597435669}), Document(page_content='No Im sorry it was my mistake, the blue one is not for sale', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 3, 'sender_name': 'User 2', 'timestamp_ms': 1675596277579}), Document(page_content='I thought you were selling the blue one!', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 4, 'sender_name': 'User 1', 'timestamp_ms': 1675595140251}), Document(page_content='Im not interested in this bag. Im interested in the blue one!', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 5, 'sender_name': 'User 1', 'timestamp_ms': 1675595109305}), Document(page_content='Here is $129', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 6, 'sender_name': 'User 2', 'timestamp_ms': 1675595068468}), Document(page_content='', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 7, 'sender_name': 'User 2', 'timestamp_ms': 1675595060730}), Document(page_content='Online is at least $100', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 8, 'sender_name': 'User 2', 'timestamp_ms': 1675595045152}), Document(page_content='How much do you want?', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 9, 'sender_name': 'User 1', 'timestamp_ms': 1675594799696}), Document(page_content='Goodmorning! $50 is too low.', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 10, 'sender_name': 'User 2', 'timestamp_ms': 1675577876645}), Document(page_content='Hi! Im interested in your bag. Im offering $50. Let me know if you are interested. Thanks!', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 11, 'sender_name': 'User 1', 'timestamp_ms': 1675549022673})]\n\nNow, you will see that the documents contain the metadata associated with the content we extracted.\n\nThe `metadata_func`[​](#the-metadata_func \"Direct link to the-metadata_func\")\n-----------------------------------------------------------------------------\n\nAs shown above, the `metadata_func` accepts the default metadata generated by the `JSONLoader`. This allows full control to the user with respect to how the metadata is formatted.\n\nFor example, the default metadata contains the `source` and the `seq_num` keys. However, it is possible that the JSON data contain these keys as well. The user can then exploit the `metadata_func` to rename the default keys and use the ones from the JSON data.\n\nThe example below shows how we can modify the `source` to only contain information of the file source relative to the `langchain` directory.\n\n # Define the metadata extraction function.def metadata_func(record: dict, metadata: dict) -> dict: metadata[\"sender_name\"] = record.get(\"sender_name\") metadata[\"timestamp_ms\"] = record.get(\"timestamp_ms\") if \"source\" in metadata: source = metadata[\"source\"].split(\"/\") source = source[source.index(\"langchain\"):] metadata[\"source\"] = \"/\".join(source) return metadataloader = JSONLoader( file_path='./example_data/facebook_chat.json', jq_schema='.messages[]', content_key=\"content\", metadata_func=metadata_func)data = loader.load()\n\n pprint(data)\n\n [Document(page_content='Bye!', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 1, 'sender_name': 'User 2', 'timestamp_ms': 1675597571851}), Document(page_content='Oh no worries! Bye', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 2, 'sender_name': 'User 1', 'timestamp_ms': 1675597435669}), Document(page_content='No Im sorry it was my mistake, the blue one is not for sale', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 3, 'sender_name': 'User 2', 'timestamp_ms': 1675596277579}), Document(page_content='I thought you were selling the blue one!', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 4, 'sender_name': 'User 1', 'timestamp_ms': 1675595140251}), Document(page_content='Im not interested in this bag. Im interested in the blue one!', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 5, 'sender_name': 'User 1', 'timestamp_ms': 1675595109305}), Document(page_content='Here is $129', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 6, 'sender_name': 'User 2', 'timestamp_ms': 1675595068468}), Document(page_content='', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 7, 'sender_name': 'User 2', 'timestamp_ms': 1675595060730}), Document(page_content='Online is at least $100', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 8, 'sender_name': 'User 2', 'timestamp_ms': 1675595045152}), Document(page_content='How much do you want?', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 9, 'sender_name': 'User 1', 'timestamp_ms': 1675594799696}), Document(page_content='Goodmorning! $50 is too low.', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 10, 'sender_name': 'User 2', 'timestamp_ms': 1675577876645}), Document(page_content='Hi! Im interested in your bag. Im offering $50. Let me know if you are interested. Thanks!', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 11, 'sender_name': 'User 1', 'timestamp_ms': 1675549022673})]\n\nCommon JSON structures with jq schema[​](#common-json-structures-with-jq-schema \"Direct link to Common JSON structures with jq schema\")\n---------------------------------------------------------------------------------------------------------------------------------------\n\nThe list below provides a reference to the possible `jq_schema` the user can use to extract content from the JSON data depending on the structure.\n\n JSON -> [{\"text\": ...}, {\"text\": ...}, {\"text\": ...}]jq_schema -> \".[].text\"JSON -> {\"key\": [{\"text\": ...}, {\"text\": ...}, {\"text\": ...}]}jq_schema -> \".key[].text\"JSON -> [\"...\", \"...\", \"...\"]jq_schema -> \".[]\"\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/document_loader_json.mdx)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to load HTML\n\n](/v0.2/docs/how_to/document_loader_html/)[\n\nNext\n\nHow to load Markdown\n\n](/v0.2/docs/how_to/document_loader_markdown/)\n\n* [Using `JSONLoader`](#using-jsonloader)\n * [JSON file](#json-file)\n * [JSON Lines file](#json-lines-file)\n * [JSON file with jq schema `content_key`](#json-file-with-jq-schema-content_key)\n* [Extracting metadata](#extracting-metadata)\n* [The `metadata_func`](#the-metadata_func)\n* [Common JSON structures with jq schema](#common-json-structures-with-jq-schema)"},"last_modified":{"kind":"null"}}},{"rowIdx":1351,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/character_text_splitter/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to split by character\n\nHow to split by character\n=========================\n\nThis is the simplest method. This splits based on a given character sequence, which defaults to `\"\\n\\n\"`. Chunk length is measured by number of characters.\n\n1. How the text is split: by single character separator.\n2. How the chunk size is measured: by number of characters.\n\nTo obtain the string content directly, use `.split_text`.\n\nTo create LangChain [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) objects (e.g., for use in downstream tasks), use `.create_documents`.\n\n %pip install -qU langchain-text-splitters\n\n from langchain_text_splitters import CharacterTextSplitter# Load an example documentwith open(\"state_of_the_union.txt\") as f: state_of_the_union = f.read()text_splitter = CharacterTextSplitter( separator=\"\\n\\n\", chunk_size=1000, chunk_overlap=200, length_function=len, is_separator_regex=False,)texts = text_splitter.create_documents([state_of_the_union])print(texts[0])\n\n**API Reference:**[CharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.CharacterTextSplitter.html)\n\n page_content='Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \\n\\nLast year COVID-19 kept us apart. This year we are finally together again. \\n\\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \\n\\nWith a duty to one another to the American people to the Constitution. \\n\\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \\n\\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \\n\\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \\n\\nHe met the Ukrainian people. \\n\\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world.'\n\nUse `.create_documents` to propagate metadata associated with each document to the output chunks:\n\n metadatas = [{\"document\": 1}, {\"document\": 2}]documents = text_splitter.create_documents( [state_of_the_union, state_of_the_union], metadatas=metadatas)print(documents[0])\n\n page_content='Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \\n\\nLast year COVID-19 kept us apart. This year we are finally together again. \\n\\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \\n\\nWith a duty to one another to the American people to the Constitution. \\n\\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \\n\\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \\n\\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \\n\\nHe met the Ukrainian people. \\n\\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world.' metadata={'document': 1}\n\nUse `.split_text` to obtain the string content directly:\n\n text_splitter.split_text(state_of_the_union)[0]\n\n 'Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \\n\\nLast year COVID-19 kept us apart. This year we are finally together again. \\n\\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \\n\\nWith a duty to one another to the American people to the Constitution. \\n\\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \\n\\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \\n\\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \\n\\nHe met the Ukrainian people. \\n\\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world.'\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/character_text_splitter.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to pass callbacks in at runtime\n\n](/v0.2/docs/how_to/callbacks_runtime/)[\n\nNext\n\nHow to cache chat model responses\n\n](/v0.2/docs/how_to/chat_model_caching/)"},"last_modified":{"kind":"null"}}},{"rowIdx":1352,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/chat_model_caching/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to cache chat model responses\n\nOn this page\n\nHow to cache chat model responses\n=================================\n\nPrerequisites\n\nThis guide assumes familiarity with the following concepts:\n\n* [Chat models](/v0.2/docs/concepts/#chat-models)\n* [LLMs](/v0.2/docs/concepts/#llms)\n\nLangChain provides an optional caching layer for chat models. This is useful for two main reasons:\n\n* It can save you money by reducing the number of API calls you make to the LLM provider, if you're often requesting the same completion multiple times. This is especially useful during app development.\n* It can speed up your application by reducing the number of API calls you make to the LLM provider.\n\nThis guide will walk you through how to enable this in your apps.\n\n* OpenAI\n* Anthropic\n* Azure\n* Google\n* Cohere\n* FireworksAI\n* Groq\n* MistralAI\n* TogetherAI\n\n pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"OPENAI_API_KEY\"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")\n\n pip install -qU langchain-anthropic\n\n import getpassimport osos.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()from langchain_anthropic import ChatAnthropicllm = ChatAnthropic(model=\"claude-3-sonnet-20240229\")\n\n pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"AZURE_OPENAI_API_KEY\"] = getpass.getpass()from langchain_openai import AzureChatOpenAIllm = AzureChatOpenAI( azure_endpoint=os.environ[\"AZURE_OPENAI_ENDPOINT\"], azure_deployment=os.environ[\"AZURE_OPENAI_DEPLOYMENT_NAME\"], openai_api_version=os.environ[\"AZURE_OPENAI_API_VERSION\"],)\n\n pip install -qU langchain-google-vertexai\n\n import getpassimport osos.environ[\"GOOGLE_API_KEY\"] = getpass.getpass()from langchain_google_vertexai import ChatVertexAIllm = ChatVertexAI(model=\"gemini-pro\")\n\n pip install -qU langchain-cohere\n\n import getpassimport osos.environ[\"COHERE_API_KEY\"] = getpass.getpass()from langchain_cohere import ChatCoherellm = ChatCohere(model=\"command-r\")\n\n pip install -qU langchain-fireworks\n\n import getpassimport osos.environ[\"FIREWORKS_API_KEY\"] = getpass.getpass()from langchain_fireworks import ChatFireworksllm = ChatFireworks(model=\"accounts/fireworks/models/mixtral-8x7b-instruct\")\n\n pip install -qU langchain-groq\n\n import getpassimport osos.environ[\"GROQ_API_KEY\"] = getpass.getpass()from langchain_groq import ChatGroqllm = ChatGroq(model=\"llama3-8b-8192\")\n\n pip install -qU langchain-mistralai\n\n import getpassimport osos.environ[\"MISTRAL_API_KEY\"] = getpass.getpass()from langchain_mistralai import ChatMistralAIllm = ChatMistralAI(model=\"mistral-large-latest\")\n\n pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"TOGETHER_API_KEY\"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI( base_url=\"https://api.together.xyz/v1\", api_key=os.environ[\"TOGETHER_API_KEY\"], model=\"mistralai/Mixtral-8x7B-Instruct-v0.1\",)\n\n # from langchain.globals import set_llm_cache\n\n**API Reference:**[set\\_llm\\_cache](https://api.python.langchain.com/en/latest/globals/langchain.globals.set_llm_cache.html)\n\nIn Memory Cache[​](#in-memory-cache \"Direct link to In Memory Cache\")\n---------------------------------------------------------------------\n\nThis is an ephemeral cache that stores model calls in memory. It will be wiped when your environment restarts, and is not shared across processes.\n\n %%timefrom langchain.cache import InMemoryCacheset_llm_cache(InMemoryCache())# The first time, it is not yet in cache, so it should take longerllm.invoke(\"Tell me a joke\")\n\n**API Reference:**[InMemoryCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.InMemoryCache.html)\n\n CPU times: user 645 ms, sys: 214 ms, total: 859 msWall time: 829 ms\n\n AIMessage(content=\"Why don't scientists trust atoms?\\n\\nBecause they make up everything!\", response_metadata={'token_usage': {'completion_tokens': 13, 'prompt_tokens': 11, 'total_tokens': 24}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_c2295e73ad', 'finish_reason': 'stop', 'logprobs': None}, id='run-b6836bdd-8c30-436b-828f-0ac5fc9ab50e-0')\n\n %%time# The second time it is, so it goes fasterllm.invoke(\"Tell me a joke\")\n\n CPU times: user 822 µs, sys: 288 µs, total: 1.11 msWall time: 1.06 ms\n\n AIMessage(content=\"Why don't scientists trust atoms?\\n\\nBecause they make up everything!\", response_metadata={'token_usage': {'completion_tokens': 13, 'prompt_tokens': 11, 'total_tokens': 24}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_c2295e73ad', 'finish_reason': 'stop', 'logprobs': None}, id='run-b6836bdd-8c30-436b-828f-0ac5fc9ab50e-0')\n\nSQLite Cache[​](#sqlite-cache \"Direct link to SQLite Cache\")\n------------------------------------------------------------\n\nThis cache implementation uses a `SQLite` database to store responses, and will last across process restarts.\n\n !rm .langchain.db\n\n # We can do the same thing with a SQLite cachefrom langchain_community.cache import SQLiteCacheset_llm_cache(SQLiteCache(database_path=\".langchain.db\"))\n\n**API Reference:**[SQLiteCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.SQLiteCache.html)\n\n %%time# The first time, it is not yet in cache, so it should take longerllm.invoke(\"Tell me a joke\")\n\n CPU times: user 9.91 ms, sys: 7.68 ms, total: 17.6 msWall time: 657 ms\n\n AIMessage(content='Why did the scarecrow win an award? Because he was outstanding in his field!', response_metadata={'token_usage': {'completion_tokens': 17, 'prompt_tokens': 11, 'total_tokens': 28}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_c2295e73ad', 'finish_reason': 'stop', 'logprobs': None}, id='run-39d9e1e8-7766-4970-b1d8-f50213fd94c5-0')\n\n %%time# The second time it is, so it goes fasterllm.invoke(\"Tell me a joke\")\n\n CPU times: user 52.2 ms, sys: 60.5 ms, total: 113 msWall time: 127 ms\n\n AIMessage(content='Why did the scarecrow win an award? Because he was outstanding in his field!', id='run-39d9e1e8-7766-4970-b1d8-f50213fd94c5-0')\n\nNext steps[​](#next-steps \"Direct link to Next steps\")\n------------------------------------------------------\n\nYou've now learned how to cache model responses to save time and money.\n\nNext, check out the other how-to guides chat models in this section, like [how to get a model to return structured output](/v0.2/docs/how_to/structured_output/) or [how to create your own custom chat model](/v0.2/docs/how_to/custom_chat_model/).\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/chat_model_caching.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to split by character\n\n](/v0.2/docs/how_to/character_text_splitter/)[\n\nNext\n\nHow to init any model in one line\n\n](/v0.2/docs/how_to/chat_models_universal_init/)\n\n* [In Memory Cache](#in-memory-cache)\n* [SQLite Cache](#sqlite-cache)\n* [Next steps](#next-steps)"},"last_modified":{"kind":"null"}}},{"rowIdx":1353,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/chat_models_universal_init/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to init any model in one line\n\nOn this page\n\nHow to init any model in one line\n=================================\n\nMany LLM applications let end users specify what model provider and model they want the application to be powered by. This requires writing some logic to initialize different ChatModels based on some user configuration. The `init_chat_model()` helper method makes it easy to initialize a number of different model integrations without having to worry about import paths and class names.\n\nSupported models\n\nSee the [init\\_chat\\_model()](https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.base.init_chat_model.html) API reference for a full list of supported integrations.\n\nMake sure you have the integration packages installed for any model providers you want to support. E.g. you should have `langchain-openai` installed to init an OpenAI model.\n\n %pip install -qU langchain langchain-openai langchain-anthropic langchain-google-vertexai\n\nBasic usage[​](#basic-usage \"Direct link to Basic usage\")\n---------------------------------------------------------\n\n from langchain.chat_models import init_chat_model# Returns a langchain_openai.ChatOpenAI instance.gpt_4o = init_chat_model(\"gpt-4o\", model_provider=\"openai\", temperature=0)# Returns a langchain_anthropic.ChatAnthropic instance.claude_opus = init_chat_model( \"claude-3-opus-20240229\", model_provider=\"anthropic\", temperature=0)# Returns a langchain_google_vertexai.ChatVertexAI instance.gemini_15 = init_chat_model( \"gemini-1.5-pro\", model_provider=\"google_vertexai\", temperature=0)# Since all model integrations implement the ChatModel interface, you can use them in the same way.print(\"GPT-4o: \" + gpt_4o.invoke(\"what's your name\").content + \"\\n\")print(\"Claude Opus: \" + claude_opus.invoke(\"what's your name\").content + \"\\n\")print(\"Gemini 1.5: \" + gemini_15.invoke(\"what's your name\").content + \"\\n\")\n\n**API Reference:**[init\\_chat\\_model](https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.base.init_chat_model.html)\n\n GPT-4o: I'm an AI created by OpenAI, and I don't have a personal name. You can call me Assistant! How can I help you today?Claude Opus: My name is Claude. It's nice to meet you!Gemini 1.5: I am a large language model, trained by Google. I do not have a name.\n\nSimple config example[​](#simple-config-example \"Direct link to Simple config example\")\n---------------------------------------------------------------------------------------\n\n user_config = { \"model\": \"...user-specified...\", \"model_provider\": \"...user-specified...\", \"temperature\": 0, \"max_tokens\": 1000,}llm = init_chat_model(**user_config)llm.invoke(\"what's your name\")\n\nInferring model provider[​](#inferring-model-provider \"Direct link to Inferring model provider\")\n------------------------------------------------------------------------------------------------\n\nFor common and distinct model names `init_chat_model()` will attempt to infer the model provider. See the [API reference](https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.base.init_chat_model.html) for a full list of inference behavior. E.g. any model that starts with `gpt-3...` or `gpt-4...` will be inferred as using model provider `openai`.\n\n gpt_4o = init_chat_model(\"gpt-4o\", temperature=0)claude_opus = init_chat_model(\"claude-3-opus-20240229\", temperature=0)gemini_15 = init_chat_model(\"gemini-1.5-pro\", temperature=0)\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/chat_models_universal_init.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to cache chat model responses\n\n](/v0.2/docs/how_to/chat_model_caching/)[\n\nNext\n\nHow to track token usage in ChatModels\n\n](/v0.2/docs/how_to/chat_token_usage_tracking/)\n\n* [Basic usage](#basic-usage)\n* [Simple config example](#simple-config-example)\n* [Inferring model provider](#inferring-model-provider)"},"last_modified":{"kind":"null"}}},{"rowIdx":1354,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/document_loader_pdf/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to load PDFs\n\nOn this page\n\nHow to load PDFs\n================\n\n[Portable Document Format (PDF)](https://en.wikipedia.org/wiki/PDF), standardized as ISO 32000, is a file format developed by Adobe in 1992 to present documents, including text formatting and images, in a manner independent of application software, hardware, and operating systems.\n\nThis guide covers how to load `PDF` documents into the LangChain [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) format that we use downstream.\n\nLangChain integrates with a host of PDF parsers. Some are simple and relatively low-level; others will support OCR and image-processing, or perform advanced document layout analysis. The right choice will depend on your application. Below we enumerate the possibilities.\n\nUsing PyPDF[​](#using-pypdf \"Direct link to Using PyPDF\")\n---------------------------------------------------------\n\nHere we load a PDF using `pypdf` into array of documents, where each document contains the page content and metadata with `page` number.\n\n %pip install pypdf\n\n from langchain_community.document_loaders import PyPDFLoaderfile_path = ( \"../../../docs/integrations/document_loaders/example_data/layout-parser-paper.pdf\")loader = PyPDFLoader(file_path)pages = loader.load_and_split()pages[0]\n\n**API Reference:**[PyPDFLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.PyPDFLoader.html)\n\n Document(page_content='LayoutParser : A Unified Toolkit for Deep\\nLearning Based Document Image Analysis\\nZejiang Shen1( \\x00), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain\\nLee4, Jacob Carlson3, and Weining Li5\\n1Allen Institute for AI\\nshannons@allenai.org\\n2Brown University\\nruochen zhang@brown.edu\\n3Harvard University\\n{melissadell,jacob carlson }@fas.harvard.edu\\n4University of Washington\\nbcgl@cs.washington.edu\\n5University of Waterloo\\nw422li@uwaterloo.ca\\nAbstract. Recent advances in document image analysis (DIA) have been\\nprimarily driven by the application of neural networks. Ideally, research\\noutcomes could be easily deployed in production and extended for further\\ninvestigation. However, various factors like loosely organized codebases\\nand sophisticated model configurations complicate the easy reuse of im-\\nportant innovations by a wide audience. Though there have been on-going\\nefforts to improve reusability and simplify deep learning (DL) model\\ndevelopment in disciplines like natural language processing and computer\\nvision, none of them are optimized for challenges in the domain of DIA.\\nThis represents a major gap in the existing toolkit, as DIA is central to\\nacademic research across a wide range of disciplines in the social sciences\\nand humanities. This paper introduces LayoutParser , an open-source\\nlibrary for streamlining the usage of DL in DIA research and applica-\\ntions. The core LayoutParser library comes with a set of simple and\\nintuitive interfaces for applying and customizing DL models for layout de-\\ntection, character recognition, and many other document processing tasks.\\nTo promote extensibility, LayoutParser also incorporates a community\\nplatform for sharing both pre-trained models and full document digiti-\\nzation pipelines. We demonstrate that LayoutParser is helpful for both\\nlightweight and large-scale digitization pipelines in real-word use cases.\\nThe library is publicly available at https://layout-parser.github.io .\\nKeywords: Document Image Analysis ·Deep Learning ·Layout Analysis\\n·Character Recognition ·Open Source library ·Toolkit.\\n1 Introduction\\nDeep Learning(DL)-based approaches are the state-of-the-art for a wide range of\\ndocument image analysis (DIA) tasks including document image classification [ 11,arXiv:2103.15348v2 [cs.CV] 21 Jun 2021', metadata={'source': '../../../docs/integrations/document_loaders/example_data/layout-parser-paper.pdf', 'page': 0})\n\nAn advantage of this approach is that documents can be retrieved with page numbers.\n\n### Vector search over PDFs[​](#vector-search-over-pdfs \"Direct link to Vector search over PDFs\")\n\nOnce we have loaded PDFs into LangChain `Document` objects, we can index them (e.g., a RAG application) in the usual way:\n\n %pip install faiss-cpu # use `pip install faiss-gpu` for CUDA GPU support\n\n import getpassimport osos.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n\n from langchain_community.vectorstores import FAISSfrom langchain_openai import OpenAIEmbeddingsfaiss_index = FAISS.from_documents(pages, OpenAIEmbeddings())docs = faiss_index.similarity_search(\"What is LayoutParser?\", k=2)for doc in docs: print(str(doc.metadata[\"page\"]) + \":\", doc.page_content[:300])\n\n**API Reference:**[FAISS](https://api.python.langchain.com/en/latest/vectorstores/langchain_community.vectorstores.faiss.FAISS.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html)\n\n 13: 14 Z. Shen et al.6 ConclusionLayoutParser provides a comprehensive toolkit for deep learning-based documentimage analysis. The off-the-shelf library is easy to install, and can be used tobuild flexible and accurate pipelines for processing documents with complicatedstructures. It also supports hi0: LayoutParser : A Unified Toolkit for DeepLearning Based Document Image AnalysisZejiang Shen1( ), Ruochen Zhang2, Melissa Dell3, Benjamin Charles GermainLee4, Jacob Carlson3, and Weining Li51Allen Institute for AIshannons@allenai.org2Brown Universityruochen zhang@brown.edu3Harvard University\n\n### Extract text from images[​](#extract-text-from-images \"Direct link to Extract text from images\")\n\nSome PDFs contain images of text-- e.g., within scanned documents, or figures. Using the `rapidocr-onnxruntime` package we can extract images as text as well:\n\n %pip install rapidocr-onnxruntime\n\n loader = PyPDFLoader(\"https://arxiv.org/pdf/2103.15348.pdf\", extract_images=True)pages = loader.load()pages[4].page_content\n\n 'LayoutParser : A Unified Toolkit for DL-Based DIA 5\\nTable 1: Current layout detection models in the LayoutParser model zoo\\nDataset Base Model1Large Model Notes\\nPubLayNet [38] F / M M Layouts of modern scientific documents\\nPRImA [3] M - Layouts of scanned modern magazines and scientific reports\\nNewspaper [17] F - Layouts of scanned US newspapers from the 20th century\\nTableBank [18] F F Table region on modern scientific and business document\\nHJDataset [31] F / M - Layouts of history Japanese documents\\n1For each dataset, we train several models of different sizes for different needs (the trade-off between accuracy\\nvs. computational cost). For “base model” and “large model”, we refer to using the ResNet 50 or ResNet 101\\nbackbones [ 13], respectively. One can train models of different architectures, like Faster R-CNN [ 28] (F) and Mask\\nR-CNN [ 12] (M). For example, an F in the Large Model column indicates it has a Faster R-CNN model trained\\nusing the ResNet 101 backbone. The platform is maintained and a number of additions will be made to the model\\nzoo in coming months.\\nlayout data structures , which are optimized for efficiency and versatility. 3) When\\nnecessary, users can employ existing or customized OCR models via the unified\\nAPI provided in the OCR module . 4)LayoutParser comes with a set of utility\\nfunctions for the visualization and storage of the layout data. 5) LayoutParser\\nis also highly customizable, via its integration with functions for layout data\\nannotation and model training . We now provide detailed descriptions for each\\ncomponent.\\n3.1 Layout Detection Models\\nInLayoutParser , a layout model takes a document image as an input and\\ngenerates a list of rectangular boxes for the target content regions. Different\\nfrom traditional methods, it relies on deep convolutional neural networks rather\\nthan manually curated rules to identify content regions. It is formulated as an\\nobject detection problem and state-of-the-art models like Faster R-CNN [ 28] and\\nMask R-CNN [ 12] are used. This yields prediction results of high accuracy and\\nmakes it possible to build a concise, generalized interface for layout detection.\\nLayoutParser , built upon Detectron2 [ 35], provides a minimal API that can\\nperform layout detection with only four lines of code in Python:\\n1import layoutparser as lp\\n2image = cv2. imread (\" image_file \") # load images\\n3model = lp. Detectron2LayoutModel (\\n4 \"lp :// PubLayNet / faster_rcnn_R_50_FPN_3x / config \")\\n5layout = model . detect ( image )\\nLayoutParser provides a wealth of pre-trained model weights using various\\ndatasets covering different languages, time periods, and document types. Due to\\ndomain shift [ 7], the prediction performance can notably drop when models are ap-\\nplied to target samples that are significantly different from the training dataset. As\\ndocument structures and layouts vary greatly in different domains, it is important\\nto select models trained on a dataset similar to the test samples. A semantic syntax\\nis used for initializing the model weights in LayoutParser , using both the dataset\\nname and model name lp:/// .'\n\nUsing PyMuPDF[​](#using-pymupdf \"Direct link to Using PyMuPDF\")\n---------------------------------------------------------------\n\nThis is the fastest of the PDF parsing options, and contains detailed metadata about the PDF and its pages, as well as returns one document per page.\n\n from langchain_community.document_loaders import PyMuPDFLoaderloader = PyMuPDFLoader(\"example_data/layout-parser-paper.pdf\")data = loader.load()data[0]\n\n**API Reference:**[PyMuPDFLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.PyMuPDFLoader.html)\n\nAdditionally, you can pass along any of the options from the [PyMuPDF documentation](https://pymupdf.readthedocs.io/en/latest/app1.html#plain-text/) as keyword arguments in the `load` call, and it will be pass along to the `get_text()` call.\n\nUsing MathPix[​](#using-mathpix \"Direct link to Using MathPix\")\n---------------------------------------------------------------\n\nInspired by Daniel Gross's [https://gist.github.com/danielgross/3ab4104e14faccc12b49200843adab21](https://gist.github.com/danielgross/3ab4104e14faccc12b49200843adab21)\n\n from langchain_community.document_loaders import MathpixPDFLoaderfile_path = ( \"../../../docs/integrations/document_loaders/example_data/layout-parser-paper.pdf\")loader = MathpixPDFLoader(file_path)data = loader.load()\n\n**API Reference:**[MathpixPDFLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.MathpixPDFLoader.html)\n\nUsing Unstructured[​](#using-unstructured \"Direct link to Using Unstructured\")\n------------------------------------------------------------------------------\n\n[Unstructured](https://unstructured-io.github.io/unstructured/) supports a common interface for working with unstructured or semi-structured file formats, such as Markdown or PDF. LangChain's [UnstructuredPDFLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.UnstructuredPDFLoader.html) integrates with Unstructured to parse PDF documents into LangChain [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) objects.\n\n from langchain_community.document_loaders import UnstructuredPDFLoaderfile_path = ( \"../../../docs/integrations/document_loaders/example_data/layout-parser-paper.pdf\")loader = UnstructuredPDFLoader(file_path)data = loader.load()\n\n**API Reference:**[UnstructuredPDFLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.UnstructuredPDFLoader.html)\n\n### Retain Elements[​](#retain-elements \"Direct link to Retain Elements\")\n\nUnder the hood, Unstructured creates different \"elements\" for different chunks of text. By default we combine those together, but you can easily keep that separation by specifying `mode=\"elements\"`.\n\n file_path = ( \"../../../docs/integrations/document_loaders/example_data/layout-parser-paper.pdf\")loader = UnstructuredPDFLoader(file_path, mode=\"elements\")data = loader.load()data[0]\n\n Document(page_content='1 2 0 2', metadata={'source': '../../../docs/integrations/document_loaders/example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((16.34, 213.36), (16.34, 253.36), (36.34, 253.36), (36.34, 213.36)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': '../../../docs/integrations/document_loaders/example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2024-03-18T13:22:22', 'page_number': 1, 'filetype': 'application/pdf', 'category': 'UncategorizedText'})\n\nSee the full set of element types for this particular document:\n\n set(doc.metadata[\"category\"] for doc in data)\n\n {'ListItem', 'NarrativeText', 'Title', 'UncategorizedText'}\n\n### Fetching remote PDFs using Unstructured[​](#fetching-remote-pdfs-using-unstructured \"Direct link to Fetching remote PDFs using Unstructured\")\n\nThis covers how to load online PDFs into a document format that we can use downstream. This can be used for various online PDF sites such as [https://open.umn.edu/opentextbooks/textbooks/](https://open.umn.edu/opentextbooks/textbooks/) and [https://arxiv.org/archive/](https://arxiv.org/archive/)\n\nNote: all other PDF loaders can also be used to fetch remote PDFs, but `OnlinePDFLoader` is a legacy function, and works specifically with `UnstructuredPDFLoader`.\n\n from langchain_community.document_loaders import OnlinePDFLoaderloader = OnlinePDFLoader(\"https://arxiv.org/pdf/2302.03803.pdf\")data = loader.load()\n\n**API Reference:**[OnlinePDFLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.OnlinePDFLoader.html)\n\nUsing PyPDFium2[​](#using-pypdfium2 \"Direct link to Using PyPDFium2\")\n---------------------------------------------------------------------\n\n from langchain_community.document_loaders import PyPDFium2Loaderfile_path = ( \"../../../docs/integrations/document_loaders/example_data/layout-parser-paper.pdf\")loader = PyPDFium2Loader(file_path)data = loader.load()\n\n**API Reference:**[PyPDFium2Loader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.PyPDFium2Loader.html)\n\nUsing PDFMiner[​](#using-pdfminer \"Direct link to Using PDFMiner\")\n------------------------------------------------------------------\n\n from langchain_community.document_loaders import PDFMinerLoaderfile_path = ( \"../../../docs/integrations/document_loaders/example_data/layout-parser-paper.pdf\")loader = PDFMinerLoader(file_path)data = loader.load()\n\n**API Reference:**[PDFMinerLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.PDFMinerLoader.html)\n\n### Using PDFMiner to generate HTML text[​](#using-pdfminer-to-generate-html-text \"Direct link to Using PDFMiner to generate HTML text\")\n\nThis can be helpful for chunking texts semantically into sections as the output html content can be parsed via `BeautifulSoup` to get more structured and rich information about font size, page numbers, PDF headers/footers, etc.\n\n from langchain_community.document_loaders import PDFMinerPDFasHTMLLoaderfile_path = ( \"../../../docs/integrations/document_loaders/example_data/layout-parser-paper.pdf\")loader = PDFMinerPDFasHTMLLoader(file_path)data = loader.load()[0]\n\n**API Reference:**[PDFMinerPDFasHTMLLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.PDFMinerPDFasHTMLLoader.html)\n\n from bs4 import BeautifulSoupsoup = BeautifulSoup(data.page_content, \"html.parser\")content = soup.find_all(\"div\")\n\n import recur_fs = Nonecur_text = \"\"snippets = [] # first collect all snippets that have the same font sizefor c in content: sp = c.find(\"span\") if not sp: continue st = sp.get(\"style\") if not st: continue fs = re.findall(\"font-size:(\\d+)px\", st) if not fs: continue fs = int(fs[0]) if not cur_fs: cur_fs = fs if fs == cur_fs: cur_text += c.text else: snippets.append((cur_text, cur_fs)) cur_fs = fs cur_text = c.textsnippets.append((cur_text, cur_fs))# Note: The above logic is very straightforward. One can also add more strategies such as removing duplicate snippets (as# headers/footers in a PDF appear on multiple pages so if we find duplicates it's safe to assume that it is redundant info)\n\n from langchain_core.documents import Documentcur_idx = -1semantic_snippets = []# Assumption: headings have higher font size than their respective contentfor s in snippets: # if current snippet's font size > previous section's heading => it is a new heading if ( not semantic_snippets or s[1] > semantic_snippets[cur_idx].metadata[\"heading_font\"] ): metadata = {\"heading\": s[0], \"content_font\": 0, \"heading_font\": s[1]} metadata.update(data.metadata) semantic_snippets.append(Document(page_content=\"\", metadata=metadata)) cur_idx += 1 continue # if current snippet's font size <= previous section's content => content belongs to the same section (one can also create # a tree like structure for sub sections if needed but that may require some more thinking and may be data specific) if ( not semantic_snippets[cur_idx].metadata[\"content_font\"] or s[1] <= semantic_snippets[cur_idx].metadata[\"content_font\"] ): semantic_snippets[cur_idx].page_content += s[0] semantic_snippets[cur_idx].metadata[\"content_font\"] = max( s[1], semantic_snippets[cur_idx].metadata[\"content_font\"] ) continue # if current snippet's font size > previous section's content but less than previous section's heading than also make a new # section (e.g. title of a PDF will have the highest font size but we don't want it to subsume all sections) metadata = {\"heading\": s[0], \"content_font\": 0, \"heading_font\": s[1]} metadata.update(data.metadata) semantic_snippets.append(Document(page_content=\"\", metadata=metadata)) cur_idx += 1\n\n**API Reference:**[Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html)\n\n semantic_snippets[4]\n\n Document(page_content='Recently, various DL models and datasets have been developed for layout analysis\\ntasks. The dhSegment [22] utilizes fully convolutional networks [20] for segmen-\\ntation tasks on historical documents. Object detection-based methods like Faster\\nR-CNN [28] and Mask R-CNN [12] are used for identifying document elements [38]\\nand detecting tables [30, 26]. Most recently, Graph Neural Networks [29] have also\\nbeen used in table detection [27]. However, these models are usually implemented\\nindividually and there is no unified framework to load and use such models.\\nThere has been a surge of interest in creating open-source tools for document\\nimage processing: a search of document image analysis in Github leads to 5M\\nrelevant code pieces 6; yet most of them rely on traditional rule-based methods\\nor provide limited functionalities. The closest prior research to our work is the\\nOCR-D project7, which also tries to build a complete toolkit for DIA. However,\\nsimilar to the platform developed by Neudecker et al. [21], it is designed for\\nanalyzing historical documents, and provides no supports for recent DL models.\\nThe DocumentLayoutAnalysis project8 focuses on processing born-digital PDF\\ndocuments via analyzing the stored PDF data. Repositories like DeepLayout9\\nand Detectron2-PubLayNet10 are individual deep learning models trained on\\nlayout analysis datasets without support for the full DIA pipeline. The Document\\nAnalysis and Exploitation (DAE) platform [15] and the DeepDIVA project [2]\\naim to improve the reproducibility of DIA methods (or DL models), yet they\\nare not actively maintained. OCR engines like Tesseract [14], easyOCR11 and\\npaddleOCR12 usually do not come with comprehensive functionalities for other\\nDIA tasks like layout analysis.\\nRecent years have also seen numerous efforts to create libraries for promoting\\nreproducibility and reusability in the field of DL. Libraries like Dectectron2 [35],\\n6 The number shown is obtained by specifying the search type as ‘code’.\\n7 https://ocr-d.de/en/about\\n8 https://github.com/BobLd/DocumentLayoutAnalysis\\n9 https://github.com/leonlulu/DeepLayout\\n10 https://github.com/hpanwar08/detectron2\\n11 https://github.com/JaidedAI/EasyOCR\\n12 https://github.com/PaddlePaddle/PaddleOCR\\n4\\nZ. Shen et al.\\nFig. 1: The overall architecture of LayoutParser. For an input document image,\\nthe core LayoutParser library provides a set of off-the-shelf tools for layout\\ndetection, OCR, visualization, and storage, backed by a carefully designed layout\\ndata structure. LayoutParser also supports high level customization via efficient\\nlayout annotation and model training functions. These improve model accuracy\\non the target samples. The community platform enables the easy sharing of DIA\\nmodels and whole digitization pipelines to promote reusability and reproducibility.\\nA collection of detailed documentation, tutorials and exemplar projects make\\nLayoutParser easy to learn and use.\\nAllenNLP [8] and transformers [34] have provided the community with complete\\nDL-based support for developing and deploying models for general computer\\nvision and natural language processing problems. LayoutParser, on the other\\nhand, specializes specifically in DIA tasks. LayoutParser is also equipped with a\\ncommunity platform inspired by established model hubs such as Torch Hub [23]\\nand TensorFlow Hub [1]. It enables the sharing of pretrained models as well as\\nfull document processing pipelines that are unique to DIA tasks.\\nThere have been a variety of document data collections to facilitate the\\ndevelopment of DL models. Some examples include PRImA [3](magazine layouts),\\nPubLayNet [38](academic paper layouts), Table Bank [18](tables in academic\\npapers), Newspaper Navigator Dataset [16, 17](newspaper figure layouts) and\\nHJDataset [31](historical Japanese document layouts). A spectrum of models\\ntrained on these datasets are currently available in the LayoutParser model zoo\\nto support different use cases.\\n', metadata={'heading': '2 Related Work\\n', 'content_font': 9, 'heading_font': 11, 'source': '../../../docs/integrations/document_loaders/example_data/layout-parser-paper.pdf'})\n\nPyPDF Directory[​](#pypdf-directory \"Direct link to PyPDF Directory\")\n---------------------------------------------------------------------\n\nLoad PDFs from directory\n\n from langchain_community.document_loaders import PyPDFDirectoryLoader\n\n**API Reference:**[PyPDFDirectoryLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.PyPDFDirectoryLoader.html)\n\n directory_path = \"../../../docs/integrations/document_loaders/example_data/\"loader = PyPDFDirectoryLoader(\"example_data/\")docs = loader.load()\n\nUsing PDFPlumber[​](#using-pdfplumber \"Direct link to Using PDFPlumber\")\n------------------------------------------------------------------------\n\nLike PyMuPDF, the output Documents contain detailed metadata about the PDF and its pages, and returns one document per page.\n\n from langchain_community.document_loaders import PDFPlumberLoaderdata = loader.load()data[0]\n\n**API Reference:**[PDFPlumberLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.PDFPlumberLoader.html)\n\nUsing AmazonTextractPDFParser[​](#using-amazontextractpdfparser \"Direct link to Using AmazonTextractPDFParser\")\n---------------------------------------------------------------------------------------------------------------\n\nThe AmazonTextractPDFLoader calls the [Amazon Textract Service](https://aws.amazon.com/textract/) to convert PDFs into a Document structure. The loader does pure OCR at the moment, with more features like layout support planned, depending on demand. Single and multi-page documents are supported with up to 3000 pages and 512 MB of size.\n\nFor the call to be successful an AWS account is required, similar to the [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html) requirements.\n\nBesides the AWS configuration, it is very similar to the other PDF loaders, while also supporting JPEG, PNG and TIFF and non-native PDF formats.\n\n from langchain_community.document_loaders import AmazonTextractPDFLoaderloader = AmazonTextractPDFLoader(\"example_data/alejandro_rosalez_sample-small.jpeg\")documents = loader.load()\n\n**API Reference:**[AmazonTextractPDFLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.AmazonTextractPDFLoader.html)\n\nUsing AzureAIDocumentIntelligenceLoader[​](#using-azureaidocumentintelligenceloader \"Direct link to Using AzureAIDocumentIntelligenceLoader\")\n---------------------------------------------------------------------------------------------------------------------------------------------\n\n[Azure AI Document Intelligence](https://aka.ms/doc-intelligence) (formerly known as `Azure Form Recognizer`) is machine-learning based service that extracts texts (including handwriting), tables, document structures (e.g., titles, section headings, etc.) and key-value-pairs from digital or scanned PDFs, images, Office and HTML files. Document Intelligence supports `PDF`, `JPEG/JPG`, `PNG`, `BMP`, `TIFF`, `HEIF`, `DOCX`, `XLSX`, `PPTX` and `HTML`.\n\nThis [current implementation](https://aka.ms/di-langchain) of a loader using `Document Intelligence` can incorporate content page-wise and turn it into LangChain documents. The default output format is markdown, which can be easily chained with `MarkdownHeaderTextSplitter` for semantic document chunking. You can also use `mode=\"single\"` or `mode=\"page\"` to return pure texts in a single page or document split by page.\n\n### Prerequisite[​](#prerequisite \"Direct link to Prerequisite\")\n\nAn Azure AI Document Intelligence resource in one of the 3 preview regions: **East US**, **West US2**, **West Europe** - follow [this document](https://learn.microsoft.com/azure/ai-services/document-intelligence/create-document-intelligence-resource?view=doc-intel-4.0.0) to create one if you don't have. You will be passing `` and `` as parameters to the loader.\n\n %pip install --upgrade --quiet langchain langchain-community azure-ai-documentintelligence\n\n from langchain_community.document_loaders import AzureAIDocumentIntelligenceLoaderfile_path = \"\"endpoint = \"\"key = \"\"loader = AzureAIDocumentIntelligenceLoader( api_endpoint=endpoint, api_key=key, file_path=file_path, api_model=\"prebuilt-layout\")documents = loader.load()\n\n**API Reference:**[AzureAIDocumentIntelligenceLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.doc_intelligence.AzureAIDocumentIntelligenceLoader.html)\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/document_loader_pdf.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to load Microsoft Office files\n\n](/v0.2/docs/how_to/document_loader_office_file/)[\n\nNext\n\nHow to create a dynamic (self-constructing) chain\n\n](/v0.2/docs/how_to/dynamic_chain/)\n\n* [Using PyPDF](#using-pypdf)\n * [Vector search over PDFs](#vector-search-over-pdfs)\n * [Extract text from images](#extract-text-from-images)\n* [Using PyMuPDF](#using-pymupdf)\n* [Using MathPix](#using-mathpix)\n* [Using Unstructured](#using-unstructured)\n * [Retain Elements](#retain-elements)\n * [Fetching remote PDFs using Unstructured](#fetching-remote-pdfs-using-unstructured)\n* [Using PyPDFium2](#using-pypdfium2)\n* [Using PDFMiner](#using-pdfminer)\n * [Using PDFMiner to generate HTML text](#using-pdfminer-to-generate-html-text)\n* [PyPDF Directory](#pypdf-directory)\n* [Using PDFPlumber](#using-pdfplumber)\n* [Using AmazonTextractPDFParser](#using-amazontextractpdfparser)\n* [Using AzureAIDocumentIntelligenceLoader](#using-azureaidocumentintelligenceloader)\n * [Prerequisite](#prerequisite)"},"last_modified":{"kind":"null"}}},{"rowIdx":1355,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/document_loader_office_file/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to load Microsoft Office files\n\nOn this page\n\nHow to load Microsoft Office files\n==================================\n\nThe [Microsoft Office](https://www.office.com/) suite of productivity software includes Microsoft Word, Microsoft Excel, Microsoft PowerPoint, Microsoft Outlook, and Microsoft OneNote. It is available for Microsoft Windows and macOS operating systems. It is also available on Android and iOS.\n\nThis covers how to load commonly used file formats including `DOCX`, `XLSX` and `PPTX` documents into a LangChain [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) object that we can use downstream.\n\nLoading DOCX, XLSX, PPTX with AzureAIDocumentIntelligenceLoader[​](#loading-docx-xlsx-pptx-with-azureaidocumentintelligenceloader \"Direct link to Loading DOCX, XLSX, PPTX with AzureAIDocumentIntelligenceLoader\")\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n[Azure AI Document Intelligence](https://aka.ms/doc-intelligence) (formerly known as `Azure Form Recognizer`) is machine-learning based service that extracts texts (including handwriting), tables, document structures (e.g., titles, section headings, etc.) and key-value-pairs from digital or scanned PDFs, images, Office and HTML files. Document Intelligence supports `PDF`, `JPEG/JPG`, `PNG`, `BMP`, `TIFF`, `HEIF`, `DOCX`, `XLSX`, `PPTX` and `HTML`.\n\nThis [current implementation](https://aka.ms/di-langchain) of a loader using `Document Intelligence` can incorporate content page-wise and turn it into LangChain documents. The default output format is markdown, which can be easily chained with `MarkdownHeaderTextSplitter` for semantic document chunking. You can also use `mode=\"single\"` or `mode=\"page\"` to return pure texts in a single page or document split by page.\n\n### Prerequisite[​](#prerequisite \"Direct link to Prerequisite\")\n\nAn Azure AI Document Intelligence resource in one of the 3 preview regions: **East US**, **West US2**, **West Europe** - follow [this document](https://learn.microsoft.com/azure/ai-services/document-intelligence/create-document-intelligence-resource?view=doc-intel-4.0.0) to create one if you don't have. You will be passing `` and `` as parameters to the loader.\n\n %pip install --upgrade --quiet langchain langchain-community azure-ai-documentintelligencefrom langchain_community.document_loaders import AzureAIDocumentIntelligenceLoaderfile_path = \"\"endpoint = \"\"key = \"\"loader = AzureAIDocumentIntelligenceLoader( api_endpoint=endpoint, api_key=key, file_path=file_path, api_model=\"prebuilt-layout\")documents = loader.load()\n\n**API Reference:**[AzureAIDocumentIntelligenceLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.doc_intelligence.AzureAIDocumentIntelligenceLoader.html)\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/document_loader_office_file.mdx)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to load Markdown\n\n](/v0.2/docs/how_to/document_loader_markdown/)[\n\nNext\n\nHow to load PDFs\n\n](/v0.2/docs/how_to/document_loader_pdf/)\n\n* [Loading DOCX, XLSX, PPTX with AzureAIDocumentIntelligenceLoader](#loading-docx-xlsx-pptx-with-azureaidocumentintelligenceloader)\n * [Prerequisite](#prerequisite)"},"last_modified":{"kind":"null"}}},{"rowIdx":1356,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/dynamic_chain/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to create a dynamic (self-constructing) chain\n\nHow to create a dynamic (self-constructing) chain\n=================================================\n\nPrerequisites\n\nThis guide assumes familiarity with the following:\n\n* [LangChain Expression Language (LCEL)](/v0.2/docs/concepts/#langchain-expression-language)\n* [How to turn any function into a runnable](/v0.2/docs/how_to/functions/)\n\nSometimes we want to construct parts of a chain at runtime, depending on the chain inputs ([routing](/v0.2/docs/how_to/routing/) is the most common example of this). We can create dynamic chains like this using a very useful property of RunnableLambda's, which is that if a RunnableLambda returns a Runnable, that Runnable is itself invoked. Let's see an example.\n\n* OpenAI\n* Anthropic\n* Azure\n* Google\n* Cohere\n* FireworksAI\n* Groq\n* MistralAI\n* TogetherAI\n\n pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"OPENAI_API_KEY\"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")\n\n pip install -qU langchain-anthropic\n\n import getpassimport osos.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()from langchain_anthropic import ChatAnthropicllm = ChatAnthropic(model=\"claude-3-sonnet-20240229\")\n\n pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"AZURE_OPENAI_API_KEY\"] = getpass.getpass()from langchain_openai import AzureChatOpenAIllm = AzureChatOpenAI( azure_endpoint=os.environ[\"AZURE_OPENAI_ENDPOINT\"], azure_deployment=os.environ[\"AZURE_OPENAI_DEPLOYMENT_NAME\"], openai_api_version=os.environ[\"AZURE_OPENAI_API_VERSION\"],)\n\n pip install -qU langchain-google-vertexai\n\n import getpassimport osos.environ[\"GOOGLE_API_KEY\"] = getpass.getpass()from langchain_google_vertexai import ChatVertexAIllm = ChatVertexAI(model=\"gemini-pro\")\n\n pip install -qU langchain-cohere\n\n import getpassimport osos.environ[\"COHERE_API_KEY\"] = getpass.getpass()from langchain_cohere import ChatCoherellm = ChatCohere(model=\"command-r\")\n\n pip install -qU langchain-fireworks\n\n import getpassimport osos.environ[\"FIREWORKS_API_KEY\"] = getpass.getpass()from langchain_fireworks import ChatFireworksllm = ChatFireworks(model=\"accounts/fireworks/models/mixtral-8x7b-instruct\")\n\n pip install -qU langchain-groq\n\n import getpassimport osos.environ[\"GROQ_API_KEY\"] = getpass.getpass()from langchain_groq import ChatGroqllm = ChatGroq(model=\"llama3-8b-8192\")\n\n pip install -qU langchain-mistralai\n\n import getpassimport osos.environ[\"MISTRAL_API_KEY\"] = getpass.getpass()from langchain_mistralai import ChatMistralAIllm = ChatMistralAI(model=\"mistral-large-latest\")\n\n pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"TOGETHER_API_KEY\"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI( base_url=\"https://api.together.xyz/v1\", api_key=os.environ[\"TOGETHER_API_KEY\"], model=\"mistralai/Mixtral-8x7B-Instruct-v0.1\",)\n\n # | echo: falsefrom langchain_anthropic import ChatAnthropicllm = ChatAnthropic(model=\"claude-3-sonnet-20240229\")\n\n**API Reference:**[ChatAnthropic](https://api.python.langchain.com/en/latest/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html)\n\n from langchain_core.output_parsers import StrOutputParserfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.runnables import Runnable, RunnablePassthrough, chaincontextualize_instructions = \"\"\"Convert the latest user question into a standalone question given the chat history. Don't answer the question, return the question and nothing else (no descriptive text).\"\"\"contextualize_prompt = ChatPromptTemplate.from_messages( [ (\"system\", contextualize_instructions), (\"placeholder\", \"{chat_history}\"), (\"human\", \"{question}\"), ])contextualize_question = contextualize_prompt | llm | StrOutputParser()qa_instructions = ( \"\"\"Answer the user question given the following context:\\n\\n{context}.\"\"\")qa_prompt = ChatPromptTemplate.from_messages( [(\"system\", qa_instructions), (\"human\", \"{question}\")])@chaindef contextualize_if_needed(input_: dict) -> Runnable: if input_.get(\"chat_history\"): # NOTE: This is returning another Runnable, not an actual output. return contextualize_question else: return RunnablePassthrough()@chaindef fake_retriever(input_: dict) -> str: return \"egypt's population in 2024 is about 111 million\"full_chain = ( RunnablePassthrough.assign(question=contextualize_if_needed).assign( context=fake_retriever ) | qa_prompt | llm | StrOutputParser())full_chain.invoke( { \"question\": \"what about egypt\", \"chat_history\": [ (\"human\", \"what's the population of indonesia\"), (\"ai\", \"about 276 million\"), ], })\n\n**API Reference:**[StrOutputParser](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.string.StrOutputParser.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [Runnable](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html) | [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html) | [chain](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.chain.html)\n\n \"According to the context provided, Egypt's population in 2024 is estimated to be about 111 million.\"\n\nThe key here is that `contextualize_if_needed` returns another Runnable and not an actual output. This returned Runnable is itself run when the full chain is executed.\n\nLooking at the trace we can see that, since we passed in chat\\_history, we executed the contextualize\\_question chain as part of the full chain: [https://smith.langchain.com/public/9e0ae34c-4082-4f3f-beed-34a2a2f4c991/r](https://smith.langchain.com/public/9e0ae34c-4082-4f3f-beed-34a2a2f4c991/r)\n\nNote that the streaming, batching, etc. capabilities of the returned Runnable are all preserved\n\n for chunk in contextualize_if_needed.stream( { \"question\": \"what about egypt\", \"chat_history\": [ (\"human\", \"what's the population of indonesia\"), (\"ai\", \"about 276 million\"), ], }): print(chunk)\n\n What is the population of Egypt?\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/dynamic_chain.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to load PDFs\n\n](/v0.2/docs/how_to/document_loader_pdf/)[\n\nNext\n\nText embedding models\n\n](/v0.2/docs/how_to/embed_text/)"},"last_modified":{"kind":"null"}}},{"rowIdx":1357,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/embed_text/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* Text embedding models\n\nOn this page\n\nText embedding models\n=====================\n\ninfo\n\nHead to [Integrations](/v0.2/docs/integrations/text_embedding/) for documentation on built-in integrations with text embedding model providers.\n\nThe Embeddings class is a class designed for interfacing with text embedding models. There are lots of embedding model providers (OpenAI, Cohere, Hugging Face, etc) - this class is designed to provide a standard interface for all of them.\n\nEmbeddings create a vector representation of a piece of text. This is useful because it means we can think about text in the vector space, and do things like semantic search where we look for pieces of text that are most similar in the vector space.\n\nThe base Embeddings class in LangChain provides two methods: one for embedding documents and one for embedding a query. The former, `.embed_documents`, takes as input multiple texts, while the latter, `.embed_query`, takes a single text. The reason for having these as two separate methods is that some embedding providers have different embedding methods for documents (to be searched over) vs queries (the search query itself). `.embed_query` will return a list of floats, whereas `.embed_documents` returns a list of lists of floats.\n\nGet started[​](#get-started \"Direct link to Get started\")\n---------------------------------------------------------\n\n### Setup[​](#setup \"Direct link to Setup\")\n\n* OpenAI\n* Cohere\n* Hugging Face\n\nTo start we'll need to install the OpenAI partner package:\n\n pip install langchain-openai\n\nAccessing the API requires an API key, which you can get by creating an account and heading [here](https://platform.openai.com/account/api-keys). Once we have a key we'll want to set it as an environment variable by running:\n\n export OPENAI_API_KEY=\"...\"\n\nIf you'd prefer not to set an environment variable you can pass the key in directly via the `api_key` named parameter when initiating the OpenAI LLM class:\n\n from langchain_openai import OpenAIEmbeddingsembeddings_model = OpenAIEmbeddings(api_key=\"...\")\n\n**API Reference:**[OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html)\n\nOtherwise you can initialize without any params:\n\n from langchain_openai import OpenAIEmbeddingsembeddings_model = OpenAIEmbeddings()\n\n**API Reference:**[OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html)\n\nTo start we'll need to install the Cohere SDK package:\n\n pip install langchain-cohere\n\nAccessing the API requires an API key, which you can get by creating an account and heading [here](https://dashboard.cohere.com/api-keys). Once we have a key we'll want to set it as an environment variable by running:\n\n export COHERE_API_KEY=\"...\"\n\nIf you'd prefer not to set an environment variable you can pass the key in directly via the `cohere_api_key` named parameter when initiating the Cohere LLM class:\n\n from langchain_cohere import CohereEmbeddingsembeddings_model = CohereEmbeddings(cohere_api_key=\"...\")\n\n**API Reference:**[CohereEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_cohere.embeddings.CohereEmbeddings.html)\n\nOtherwise you can initialize without any params:\n\n from langchain_cohere import CohereEmbeddingsembeddings_model = CohereEmbeddings()\n\n**API Reference:**[CohereEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_cohere.embeddings.CohereEmbeddings.html)\n\nTo start we'll need to install the Hugging Face partner package:\n\n pip install langchain-huggingface\n\nYou can then load any [Sentence Transformers model](https://huggingface.co/models?library=sentence-transformers) from the Hugging Face Hub.\n\n from langchain_huggingface import HuggingFaceEmbeddingsembeddings_model = HuggingFaceEmbeddings(model_name=\"sentence-transformers/all-mpnet-base-v2\")\n\n**API Reference:**[HuggingFaceEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_huggingface.embeddings.huggingface.HuggingFaceEmbeddings.html)\n\nYou can also leave the `model_name` blank to use the default [sentence-transformers/all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) model.\n\n from langchain_huggingface import HuggingFaceEmbeddingsembeddings_model = HuggingFaceEmbeddings()\n\n**API Reference:**[HuggingFaceEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_huggingface.embeddings.huggingface.HuggingFaceEmbeddings.html)\n\n### `embed_documents`[​](#embed_documents \"Direct link to embed_documents\")\n\n#### Embed list of texts[​](#embed-list-of-texts \"Direct link to Embed list of texts\")\n\nUse `.embed_documents` to embed a list of strings, recovering a list of embeddings:\n\n embeddings = embeddings_model.embed_documents( [ \"Hi there!\", \"Oh, hello!\", \"What's your name?\", \"My friends call me World\", \"Hello World!\" ])len(embeddings), len(embeddings[0])\n\n (5, 1536)\n\n### `embed_query`[​](#embed_query \"Direct link to embed_query\")\n\n#### Embed single query[​](#embed-single-query \"Direct link to Embed single query\")\n\nUse `.embed_query` to embed a single piece of text (e.g., for the purpose of comparing to other embedded pieces of texts).\n\n embedded_query = embeddings_model.embed_query(\"What was the name mentioned in the conversation?\")embedded_query[:5]\n\n [0.0053587136790156364, -0.0004999046213924885, 0.038883671164512634, -0.003001077566295862, -0.00900818221271038]\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/embed_text.mdx)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to create a dynamic (self-constructing) chain\n\n](/v0.2/docs/how_to/dynamic_chain/)[\n\nNext\n\nHow to combine results from multiple retrievers\n\n](/v0.2/docs/how_to/ensemble_retriever/)\n\n* [Get started](#get-started)\n * [Setup](#setup)\n * [`embed_documents`](#embed_documents)\n * [`embed_query`](#embed_query)"},"last_modified":{"kind":"null"}}},{"rowIdx":1358,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/ensemble_retriever/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to combine results from multiple retrievers\n\nOn this page\n\nHow to combine results from multiple retrievers\n===============================================\n\nThe [EnsembleRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.ensemble.EnsembleRetriever.html) supports ensembling of results from multiple retrievers. It is initialized with a list of [BaseRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain_core.retrievers.BaseRetriever.html) objects. EnsembleRetrievers rerank the results of the constituent retrievers based on the [Reciprocal Rank Fusion](https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf) algorithm.\n\nBy leveraging the strengths of different algorithms, the `EnsembleRetriever` can achieve better performance than any single algorithm.\n\nThe most common pattern is to combine a sparse retriever (like BM25) with a dense retriever (like embedding similarity), because their strengths are complementary. It is also known as \"hybrid search\". The sparse retriever is good at finding relevant documents based on keywords, while the dense retriever is good at finding relevant documents based on semantic similarity.\n\nBasic usage[​](#basic-usage \"Direct link to Basic usage\")\n---------------------------------------------------------\n\nBelow we demonstrate ensembling of a [BM25Retriever](https://api.python.langchain.com/en/latest/retrievers/langchain_community.retrievers.bm25.BM25Retriever.html) with a retriever derived from the [FAISS vector store](https://api.python.langchain.com/en/latest/vectorstores/langchain_community.vectorstores.faiss.FAISS.html).\n\n %pip install --upgrade --quiet rank_bm25 > /dev/null\n\n from langchain.retrievers import EnsembleRetrieverfrom langchain_community.retrievers import BM25Retrieverfrom langchain_community.vectorstores import FAISSfrom langchain_openai import OpenAIEmbeddingsdoc_list_1 = [ \"I like apples\", \"I like oranges\", \"Apples and oranges are fruits\",]# initialize the bm25 retriever and faiss retrieverbm25_retriever = BM25Retriever.from_texts( doc_list_1, metadatas=[{\"source\": 1}] * len(doc_list_1))bm25_retriever.k = 2doc_list_2 = [ \"You like apples\", \"You like oranges\",]embedding = OpenAIEmbeddings()faiss_vectorstore = FAISS.from_texts( doc_list_2, embedding, metadatas=[{\"source\": 2}] * len(doc_list_2))faiss_retriever = faiss_vectorstore.as_retriever(search_kwargs={\"k\": 2})# initialize the ensemble retrieverensemble_retriever = EnsembleRetriever( retrievers=[bm25_retriever, faiss_retriever], weights=[0.5, 0.5])\n\n**API Reference:**[EnsembleRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.ensemble.EnsembleRetriever.html) | [BM25Retriever](https://api.python.langchain.com/en/latest/retrievers/langchain_community.retrievers.bm25.BM25Retriever.html) | [FAISS](https://api.python.langchain.com/en/latest/vectorstores/langchain_community.vectorstores.faiss.FAISS.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html)\n\n docs = ensemble_retriever.invoke(\"apples\")docs\n\n [Document(page_content='I like apples', metadata={'source': 1}), Document(page_content='You like apples', metadata={'source': 2}), Document(page_content='Apples and oranges are fruits', metadata={'source': 1}), Document(page_content='You like oranges', metadata={'source': 2})]\n\nRuntime Configuration[​](#runtime-configuration \"Direct link to Runtime Configuration\")\n---------------------------------------------------------------------------------------\n\nWe can also configure the individual retrievers at runtime using [configurable fields](/v0.2/docs/how_to/configure/). Below we update the \"top-k\" parameter for the FAISS retriever specifically:\n\n from langchain_core.runnables import ConfigurableFieldfaiss_retriever = faiss_vectorstore.as_retriever( search_kwargs={\"k\": 2}).configurable_fields( search_kwargs=ConfigurableField( id=\"search_kwargs_faiss\", name=\"Search Kwargs\", description=\"The search kwargs to use\", ))ensemble_retriever = EnsembleRetriever( retrievers=[bm25_retriever, faiss_retriever], weights=[0.5, 0.5])\n\n**API Reference:**[ConfigurableField](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.utils.ConfigurableField.html)\n\n config = {\"configurable\": {\"search_kwargs_faiss\": {\"k\": 1}}}docs = ensemble_retriever.invoke(\"apples\", config=config)docs\n\n [Document(page_content='I like apples', metadata={'source': 1}), Document(page_content='You like apples', metadata={'source': 2}), Document(page_content='Apples and oranges are fruits', metadata={'source': 1})]\n\nNotice that this only returns one source from the FAISS retriever, because we pass in the relevant configuration at run time\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/ensemble_retriever.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nText embedding models\n\n](/v0.2/docs/how_to/embed_text/)[\n\nNext\n\nHow to select examples by length\n\n](/v0.2/docs/how_to/example_selectors_length_based/)\n\n* [Basic usage](#basic-usage)\n* [Runtime Configuration](#runtime-configuration)"},"last_modified":{"kind":"null"}}},{"rowIdx":1359,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/example_selectors_mmr/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to select examples by maximal marginal relevance (MMR)\n\nHow to select examples by maximal marginal relevance (MMR)\n==========================================================\n\nThe `MaxMarginalRelevanceExampleSelector` selects examples based on a combination of which examples are most similar to the inputs, while also optimizing for diversity. It does this by finding the examples with the embeddings that have the greatest cosine similarity with the inputs, and then iteratively adding them while penalizing them for closeness to already selected examples.\n\n from langchain_community.vectorstores import FAISSfrom langchain_core.example_selectors import ( MaxMarginalRelevanceExampleSelector, SemanticSimilarityExampleSelector,)from langchain_core.prompts import FewShotPromptTemplate, PromptTemplatefrom langchain_openai import OpenAIEmbeddingsexample_prompt = PromptTemplate( input_variables=[\"input\", \"output\"], template=\"Input: {input}\\nOutput: {output}\",)# Examples of a pretend task of creating antonyms.examples = [ {\"input\": \"happy\", \"output\": \"sad\"}, {\"input\": \"tall\", \"output\": \"short\"}, {\"input\": \"energetic\", \"output\": \"lethargic\"}, {\"input\": \"sunny\", \"output\": \"gloomy\"}, {\"input\": \"windy\", \"output\": \"calm\"},]\n\n**API Reference:**[FAISS](https://api.python.langchain.com/en/latest/vectorstores/langchain_community.vectorstores.faiss.FAISS.html) | [MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html) | [SemanticSimilarityExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.SemanticSimilarityExampleSelector.html) | [FewShotPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.few_shot.FewShotPromptTemplate.html) | [PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html)\n\n example_selector = MaxMarginalRelevanceExampleSelector.from_examples( # The list of examples available to select from. examples, # The embedding class used to produce embeddings which are used to measure semantic similarity. OpenAIEmbeddings(), # The VectorStore class that is used to store the embeddings and do a similarity search over. FAISS, # The number of examples to produce. k=2,)mmr_prompt = FewShotPromptTemplate( # We provide an ExampleSelector instead of examples. example_selector=example_selector, example_prompt=example_prompt, prefix=\"Give the antonym of every input\", suffix=\"Input: {adjective}\\nOutput:\", input_variables=[\"adjective\"],)\n\n # Input is a feeling, so should select the happy/sad example as the first oneprint(mmr_prompt.format(adjective=\"worried\"))\n\n Give the antonym of every inputInput: happyOutput: sadInput: windyOutput: calmInput: worriedOutput:\n\n # Let's compare this to what we would just get if we went solely off of similarity,# by using SemanticSimilarityExampleSelector instead of MaxMarginalRelevanceExampleSelector.example_selector = SemanticSimilarityExampleSelector.from_examples( # The list of examples available to select from. examples, # The embedding class used to produce embeddings which are used to measure semantic similarity. OpenAIEmbeddings(), # The VectorStore class that is used to store the embeddings and do a similarity search over. FAISS, # The number of examples to produce. k=2,)similar_prompt = FewShotPromptTemplate( # We provide an ExampleSelector instead of examples. example_selector=example_selector, example_prompt=example_prompt, prefix=\"Give the antonym of every input\", suffix=\"Input: {adjective}\\nOutput:\", input_variables=[\"adjective\"],)print(similar_prompt.format(adjective=\"worried\"))\n\n Give the antonym of every inputInput: happyOutput: sadInput: sunnyOutput: gloomyInput: worriedOutput:\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/example_selectors_mmr.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to select examples by length\n\n](/v0.2/docs/how_to/example_selectors_length_based/)[\n\nNext\n\nHow to select examples by n-gram overlap\n\n](/v0.2/docs/how_to/example_selectors_ngram/)"},"last_modified":{"kind":"null"}}},{"rowIdx":1360,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/example_selectors_length_based/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to select examples by length\n\nHow to select examples by length\n================================\n\nThis example selector selects which examples to use based on length. This is useful when you are worried about constructing a prompt that will go over the length of the context window. For longer inputs, it will select fewer examples to include, while for shorter inputs it will select more.\n\n from langchain_core.example_selectors import LengthBasedExampleSelectorfrom langchain_core.prompts import FewShotPromptTemplate, PromptTemplate# Examples of a pretend task of creating antonyms.examples = [ {\"input\": \"happy\", \"output\": \"sad\"}, {\"input\": \"tall\", \"output\": \"short\"}, {\"input\": \"energetic\", \"output\": \"lethargic\"}, {\"input\": \"sunny\", \"output\": \"gloomy\"}, {\"input\": \"windy\", \"output\": \"calm\"},]example_prompt = PromptTemplate( input_variables=[\"input\", \"output\"], template=\"Input: {input}\\nOutput: {output}\",)example_selector = LengthBasedExampleSelector( # The examples it has available to choose from. examples=examples, # The PromptTemplate being used to format the examples. example_prompt=example_prompt, # The maximum length that the formatted examples should be. # Length is measured by the get_text_length function below. max_length=25, # The function used to get the length of a string, which is used # to determine which examples to include. It is commented out because # it is provided as a default value if none is specified. # get_text_length: Callable[[str], int] = lambda x: len(re.split(\"\\n| \", x)))dynamic_prompt = FewShotPromptTemplate( # We provide an ExampleSelector instead of examples. example_selector=example_selector, example_prompt=example_prompt, prefix=\"Give the antonym of every input\", suffix=\"Input: {adjective}\\nOutput:\", input_variables=[\"adjective\"],)\n\n**API Reference:**[LengthBasedExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.length_based.LengthBasedExampleSelector.html) | [FewShotPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.few_shot.FewShotPromptTemplate.html) | [PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html)\n\n # An example with small input, so it selects all examples.print(dynamic_prompt.format(adjective=\"big\"))\n\n Give the antonym of every inputInput: happyOutput: sadInput: tallOutput: shortInput: energeticOutput: lethargicInput: sunnyOutput: gloomyInput: windyOutput: calmInput: bigOutput:\n\n # An example with long input, so it selects only one example.long_string = \"big and huge and massive and large and gigantic and tall and much much much much much bigger than everything else\"print(dynamic_prompt.format(adjective=long_string))\n\n Give the antonym of every inputInput: happyOutput: sadInput: big and huge and massive and large and gigantic and tall and much much much much much bigger than everything elseOutput:\n\n # You can add an example to an example selector as well.new_example = {\"input\": \"big\", \"output\": \"small\"}dynamic_prompt.example_selector.add_example(new_example)print(dynamic_prompt.format(adjective=\"enthusiastic\"))\n\n Give the antonym of every inputInput: happyOutput: sadInput: tallOutput: shortInput: energeticOutput: lethargicInput: sunnyOutput: gloomyInput: windyOutput: calmInput: bigOutput: smallInput: enthusiasticOutput:\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/example_selectors_length_based.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to combine results from multiple retrievers\n\n](/v0.2/docs/how_to/ensemble_retriever/)[\n\nNext\n\nHow to select examples by maximal marginal relevance (MMR)\n\n](/v0.2/docs/how_to/example_selectors_mmr/)"},"last_modified":{"kind":"null"}}},{"rowIdx":1361,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/example_selectors_similarity/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to select examples by similarity\n\nHow to select examples by similarity\n====================================\n\nThis object selects examples based on similarity to the inputs. It does this by finding the examples with the embeddings that have the greatest cosine similarity with the inputs.\n\n from langchain_chroma import Chromafrom langchain_core.example_selectors import SemanticSimilarityExampleSelectorfrom langchain_core.prompts import FewShotPromptTemplate, PromptTemplatefrom langchain_openai import OpenAIEmbeddingsexample_prompt = PromptTemplate( input_variables=[\"input\", \"output\"], template=\"Input: {input}\\nOutput: {output}\",)# Examples of a pretend task of creating antonyms.examples = [ {\"input\": \"happy\", \"output\": \"sad\"}, {\"input\": \"tall\", \"output\": \"short\"}, {\"input\": \"energetic\", \"output\": \"lethargic\"}, {\"input\": \"sunny\", \"output\": \"gloomy\"}, {\"input\": \"windy\", \"output\": \"calm\"},]\n\n**API Reference:**[SemanticSimilarityExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.SemanticSimilarityExampleSelector.html) | [FewShotPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.few_shot.FewShotPromptTemplate.html) | [PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html)\n\n example_selector = SemanticSimilarityExampleSelector.from_examples( # The list of examples available to select from. examples, # The embedding class used to produce embeddings which are used to measure semantic similarity. OpenAIEmbeddings(), # The VectorStore class that is used to store the embeddings and do a similarity search over. Chroma, # The number of examples to produce. k=1,)similar_prompt = FewShotPromptTemplate( # We provide an ExampleSelector instead of examples. example_selector=example_selector, example_prompt=example_prompt, prefix=\"Give the antonym of every input\", suffix=\"Input: {adjective}\\nOutput:\", input_variables=[\"adjective\"],)\n\n # Input is a feeling, so should select the happy/sad exampleprint(similar_prompt.format(adjective=\"worried\"))\n\n Give the antonym of every inputInput: happyOutput: sadInput: worriedOutput:\n\n # Input is a measurement, so should select the tall/short exampleprint(similar_prompt.format(adjective=\"large\"))\n\n Give the antonym of every inputInput: tallOutput: shortInput: largeOutput:\n\n # You can add new examples to the SemanticSimilarityExampleSelector as wellsimilar_prompt.example_selector.add_example( {\"input\": \"enthusiastic\", \"output\": \"apathetic\"})print(similar_prompt.format(adjective=\"passionate\"))\n\n Give the antonym of every inputInput: enthusiasticOutput: apatheticInput: passionateOutput:\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/example_selectors_similarity.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to select examples by n-gram overlap\n\n](/v0.2/docs/how_to/example_selectors_ngram/)[\n\nNext\n\nHow to use reference examples when doing extraction\n\n](/v0.2/docs/how_to/extraction_examples/)"},"last_modified":{"kind":"null"}}},{"rowIdx":1362,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/example_selectors_ngram/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to select examples by n-gram overlap\n\nHow to select examples by n-gram overlap\n========================================\n\nThe `NGramOverlapExampleSelector` selects and orders examples based on which examples are most similar to the input, according to an ngram overlap score. The ngram overlap score is a float between 0.0 and 1.0, inclusive.\n\nThe selector allows for a threshold score to be set. Examples with an ngram overlap score less than or equal to the threshold are excluded. The threshold is set to -1.0, by default, so will not exclude any examples, only reorder them. Setting the threshold to 0.0 will exclude examples that have no ngram overlaps with the input.\n\n from langchain_community.example_selectors import NGramOverlapExampleSelectorfrom langchain_core.prompts import FewShotPromptTemplate, PromptTemplateexample_prompt = PromptTemplate( input_variables=[\"input\", \"output\"], template=\"Input: {input}\\nOutput: {output}\",)# Examples of a fictional translation task.examples = [ {\"input\": \"See Spot run.\", \"output\": \"Ver correr a Spot.\"}, {\"input\": \"My dog barks.\", \"output\": \"Mi perro ladra.\"}, {\"input\": \"Spot can run.\", \"output\": \"Spot puede correr.\"},]\n\n**API Reference:**[NGramOverlapExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_community.example_selectors.ngram_overlap.NGramOverlapExampleSelector.html) | [FewShotPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.few_shot.FewShotPromptTemplate.html) | [PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html)\n\n example_selector = NGramOverlapExampleSelector( # The examples it has available to choose from. examples=examples, # The PromptTemplate being used to format the examples. example_prompt=example_prompt, # The threshold, at which selector stops. # It is set to -1.0 by default. threshold=-1.0, # For negative threshold: # Selector sorts examples by ngram overlap score, and excludes none. # For threshold greater than 1.0: # Selector excludes all examples, and returns an empty list. # For threshold equal to 0.0: # Selector sorts examples by ngram overlap score, # and excludes those with no ngram overlap with input.)dynamic_prompt = FewShotPromptTemplate( # We provide an ExampleSelector instead of examples. example_selector=example_selector, example_prompt=example_prompt, prefix=\"Give the Spanish translation of every input\", suffix=\"Input: {sentence}\\nOutput:\", input_variables=[\"sentence\"],)\n\n # An example input with large ngram overlap with \"Spot can run.\"# and no overlap with \"My dog barks.\"print(dynamic_prompt.format(sentence=\"Spot can run fast.\"))\n\n Give the Spanish translation of every inputInput: Spot can run.Output: Spot puede correr.Input: See Spot run.Output: Ver correr a Spot.Input: My dog barks.Output: Mi perro ladra.Input: Spot can run fast.Output:\n\n # You can add examples to NGramOverlapExampleSelector as well.new_example = {\"input\": \"Spot plays fetch.\", \"output\": \"Spot juega a buscar.\"}example_selector.add_example(new_example)print(dynamic_prompt.format(sentence=\"Spot can run fast.\"))\n\n Give the Spanish translation of every inputInput: Spot can run.Output: Spot puede correr.Input: See Spot run.Output: Ver correr a Spot.Input: Spot plays fetch.Output: Spot juega a buscar.Input: My dog barks.Output: Mi perro ladra.Input: Spot can run fast.Output:\n\n # You can set a threshold at which examples are excluded.# For example, setting threshold equal to 0.0# excludes examples with no ngram overlaps with input.# Since \"My dog barks.\" has no ngram overlaps with \"Spot can run fast.\"# it is excluded.example_selector.threshold = 0.0print(dynamic_prompt.format(sentence=\"Spot can run fast.\"))\n\n Give the Spanish translation of every inputInput: Spot can run.Output: Spot puede correr.Input: See Spot run.Output: Ver correr a Spot.Input: Spot plays fetch.Output: Spot juega a buscar.Input: Spot can run fast.Output:\n\n # Setting small nonzero thresholdexample_selector.threshold = 0.09print(dynamic_prompt.format(sentence=\"Spot can play fetch.\"))\n\n Give the Spanish translation of every inputInput: Spot can run.Output: Spot puede correr.Input: Spot plays fetch.Output: Spot juega a buscar.Input: Spot can play fetch.Output:\n\n # Setting threshold greater than 1.0example_selector.threshold = 1.0 + 1e-9print(dynamic_prompt.format(sentence=\"Spot can play fetch.\"))\n\n Give the Spanish translation of every inputInput: Spot can play fetch.Output:\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/example_selectors_ngram.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to select examples by maximal marginal relevance (MMR)\n\n](/v0.2/docs/how_to/example_selectors_mmr/)[\n\nNext\n\nHow to select examples by similarity\n\n](/v0.2/docs/how_to/example_selectors_similarity/)"},"last_modified":{"kind":"null"}}},{"rowIdx":1363,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/extraction_examples/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to use reference examples when doing extraction\n\nOn this page\n\nHow to use reference examples when doing extraction\n===================================================\n\nThe quality of extractions can often be improved by providing reference examples to the LLM.\n\nData extraction attempts to generate structured representations of information found in text and other unstructured or semi-structured formats. [Tool-calling](/v0.2/docs/concepts/#functiontool-calling) LLM features are often used in this context. This guide demonstrates how to build few-shot examples of tool calls to help steer the behavior of extraction and similar applications.\n\ntip\n\nWhile this guide focuses how to use examples with a tool calling model, this technique is generally applicable, and will work also with JSON more or prompt based techniques.\n\nLangChain implements a [tool-call attribute](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage.tool_calls) on messages from LLMs that include tool calls. See our [how-to guide on tool calling](/v0.2/docs/how_to/tool_calling/) for more detail. To build reference examples for data extraction, we build a chat history containing a sequence of:\n\n* [HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) containing example inputs;\n* [AIMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html) containing example tool calls;\n* [ToolMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolMessage.html) containing example tool outputs.\n\nLangChain adopts this convention for structuring tool calls into conversation across LLM model providers.\n\nFirst we build a prompt template that includes a placeholder for these messages:\n\n from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder# Define a custom prompt to provide instructions and any additional context.# 1) You can add examples into the prompt template to improve extraction quality# 2) Introduce additional parameters to take context into account (e.g., include metadata# about the document from which the text was extracted.)prompt = ChatPromptTemplate.from_messages( [ ( \"system\", \"You are an expert extraction algorithm. \" \"Only extract relevant information from the text. \" \"If you do not know the value of an attribute asked \" \"to extract, return null for the attribute's value.\", ), # ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓ MessagesPlaceholder(\"examples\"), # <-- EXAMPLES! # ↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑ (\"human\", \"{text}\"), ])\n\n**API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [MessagesPlaceholder](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.MessagesPlaceholder.html)\n\nTest out the template:\n\n from langchain_core.messages import ( HumanMessage,)prompt.invoke( {\"text\": \"this is some text\", \"examples\": [HumanMessage(content=\"testing 1 2 3\")]})\n\n**API Reference:**[HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html)\n\n ChatPromptValue(messages=[SystemMessage(content=\"You are an expert extraction algorithm. Only extract relevant information from the text. If you do not know the value of an attribute asked to extract, return null for the attribute's value.\"), HumanMessage(content='testing 1 2 3'), HumanMessage(content='this is some text')])\n\nDefine the schema[​](#define-the-schema \"Direct link to Define the schema\")\n---------------------------------------------------------------------------\n\nLet's re-use the person schema from the [extraction tutorial](/v0.2/docs/tutorials/extraction/).\n\n from typing import List, Optionalfrom langchain_core.pydantic_v1 import BaseModel, Fieldfrom langchain_openai import ChatOpenAIclass Person(BaseModel): \"\"\"Information about a person.\"\"\" # ^ Doc-string for the entity Person. # This doc-string is sent to the LLM as the description of the schema Person, # and it can help to improve extraction results. # Note that: # 1. Each field is an `optional` -- this allows the model to decline to extract it! # 2. Each field has a `description` -- this description is used by the LLM. # Having a good description can help improve extraction results. name: Optional[str] = Field(..., description=\"The name of the person\") hair_color: Optional[str] = Field( ..., description=\"The color of the person's hair if known\" ) height_in_meters: Optional[str] = Field(..., description=\"Height in METERs\")class Data(BaseModel): \"\"\"Extracted data about people.\"\"\" # Creates a model so that we can extract multiple entities. people: List[Person]\n\n**API Reference:**[ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html)\n\nDefine reference examples[​](#define-reference-examples \"Direct link to Define reference examples\")\n---------------------------------------------------------------------------------------------------\n\nExamples can be defined as a list of input-output pairs.\n\nEach example contains an example `input` text and an example `output` showing what should be extracted from the text.\n\ninfo\n\nThis is a bit in the weeds, so feel free to skip.\n\nThe format of the example needs to match the API used (e.g., tool calling or JSON mode etc.).\n\nHere, the formatted examples will match the format expected for the tool calling API since that's what we're using.\n\n import uuidfrom typing import Dict, List, TypedDictfrom langchain_core.messages import ( AIMessage, BaseMessage, HumanMessage, SystemMessage, ToolMessage,)from langchain_core.pydantic_v1 import BaseModel, Fieldclass Example(TypedDict): \"\"\"A representation of an example consisting of text input and expected tool calls. For extraction, the tool calls are represented as instances of pydantic model. \"\"\" input: str # This is the example text tool_calls: List[BaseModel] # Instances of pydantic model that should be extracteddef tool_example_to_messages(example: Example) -> List[BaseMessage]: \"\"\"Convert an example into a list of messages that can be fed into an LLM. This code is an adapter that converts our example to a list of messages that can be fed into a chat model. The list of messages per example corresponds to: 1) HumanMessage: contains the content from which content should be extracted. 2) AIMessage: contains the extracted information from the model 3) ToolMessage: contains confirmation to the model that the model requested a tool correctly. The ToolMessage is required because some of the chat models are hyper-optimized for agents rather than for an extraction use case. \"\"\" messages: List[BaseMessage] = [HumanMessage(content=example[\"input\"])] tool_calls = [] for tool_call in example[\"tool_calls\"]: tool_calls.append( { \"id\": str(uuid.uuid4()), \"args\": tool_call.dict(), # The name of the function right now corresponds # to the name of the pydantic model # This is implicit in the API right now, # and will be improved over time. \"name\": tool_call.__class__.__name__, }, ) messages.append(AIMessage(content=\"\", tool_calls=tool_calls)) tool_outputs = example.get(\"tool_outputs\") or [ \"You have correctly called this tool.\" ] * len(tool_calls) for output, tool_call in zip(tool_outputs, tool_calls): messages.append(ToolMessage(content=output, tool_call_id=tool_call[\"id\"])) return messages\n\n**API Reference:**[AIMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html) | [BaseMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.base.BaseMessage.html) | [HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) | [SystemMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.system.SystemMessage.html) | [ToolMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolMessage.html)\n\nNext let's define our examples and then convert them into message format.\n\n examples = [ ( \"The ocean is vast and blue. It's more than 20,000 feet deep. There are many fish in it.\", Person(name=None, height_in_meters=None, hair_color=None), ), ( \"Fiona traveled far from France to Spain.\", Person(name=\"Fiona\", height_in_meters=None, hair_color=None), ),]messages = []for text, tool_call in examples: messages.extend( tool_example_to_messages({\"input\": text, \"tool_calls\": [tool_call]}) )\n\nLet's test out the prompt\n\n example_prompt = prompt.invoke({\"text\": \"this is some text\", \"examples\": messages})for message in example_prompt.messages: print(f\"{message.type}: {message}\")\n\n system: content=\"You are an expert extraction algorithm. Only extract relevant information from the text. If you do not know the value of an attribute asked to extract, return null for the attribute's value.\"human: content=\"The ocean is vast and blue. It's more than 20,000 feet deep. There are many fish in it.\"ai: content='' tool_calls=[{'name': 'Person', 'args': {'name': None, 'hair_color': None, 'height_in_meters': None}, 'id': 'b843ba77-4c9c-48ef-92a4-54e534f24521'}]tool: content='You have correctly called this tool.' tool_call_id='b843ba77-4c9c-48ef-92a4-54e534f24521'human: content='Fiona traveled far from France to Spain.'ai: content='' tool_calls=[{'name': 'Person', 'args': {'name': 'Fiona', 'hair_color': None, 'height_in_meters': None}, 'id': '46f00d6b-50e5-4482-9406-b07bb10340f6'}]tool: content='You have correctly called this tool.' tool_call_id='46f00d6b-50e5-4482-9406-b07bb10340f6'human: content='this is some text'\n\nCreate an extractor[​](#create-an-extractor \"Direct link to Create an extractor\")\n---------------------------------------------------------------------------------\n\nLet's select an LLM. Because we are using tool-calling, we will need a model that supports a tool-calling feature. See [this table](/v0.2/docs/integrations/chat/) for available LLMs.\n\n* OpenAI\n* Anthropic\n* Azure\n* Google\n* Cohere\n* FireworksAI\n* Groq\n* MistralAI\n* TogetherAI\n\n pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"OPENAI_API_KEY\"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI(model=\"gpt-4-0125-preview\", temperature=0)\n\n pip install -qU langchain-anthropic\n\n import getpassimport osos.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()from langchain_anthropic import ChatAnthropicllm = ChatAnthropic(model=\"claude-3-sonnet-20240229\")\n\n pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"AZURE_OPENAI_API_KEY\"] = getpass.getpass()from langchain_openai import AzureChatOpenAIllm = AzureChatOpenAI( azure_endpoint=os.environ[\"AZURE_OPENAI_ENDPOINT\"], azure_deployment=os.environ[\"AZURE_OPENAI_DEPLOYMENT_NAME\"], openai_api_version=os.environ[\"AZURE_OPENAI_API_VERSION\"],)\n\n pip install -qU langchain-google-vertexai\n\n import getpassimport osos.environ[\"GOOGLE_API_KEY\"] = getpass.getpass()from langchain_google_vertexai import ChatVertexAIllm = ChatVertexAI(model=\"gemini-pro\")\n\n pip install -qU langchain-cohere\n\n import getpassimport osos.environ[\"COHERE_API_KEY\"] = getpass.getpass()from langchain_cohere import ChatCoherellm = ChatCohere(model=\"command-r\")\n\n pip install -qU langchain-fireworks\n\n import getpassimport osos.environ[\"FIREWORKS_API_KEY\"] = getpass.getpass()from langchain_fireworks import ChatFireworksllm = ChatFireworks(model=\"accounts/fireworks/models/mixtral-8x7b-instruct\")\n\n pip install -qU langchain-groq\n\n import getpassimport osos.environ[\"GROQ_API_KEY\"] = getpass.getpass()from langchain_groq import ChatGroqllm = ChatGroq(model=\"llama3-8b-8192\")\n\n pip install -qU langchain-mistralai\n\n import getpassimport osos.environ[\"MISTRAL_API_KEY\"] = getpass.getpass()from langchain_mistralai import ChatMistralAIllm = ChatMistralAI(model=\"mistral-large-latest\")\n\n pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"TOGETHER_API_KEY\"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI( base_url=\"https://api.together.xyz/v1\", api_key=os.environ[\"TOGETHER_API_KEY\"], model=\"mistralai/Mixtral-8x7B-Instruct-v0.1\",)\n\nFollowing the [extraction tutorial](/v0.2/docs/tutorials/extraction/), we use the `.with_structured_output` method to structure model outputs according to the desired schema:\n\n runnable = prompt | llm.with_structured_output( schema=Data, method=\"function_calling\", include_raw=False,)\n\nWithout examples 😿[​](#without-examples- \"Direct link to Without examples 😿\")\n-------------------------------------------------------------------------------\n\nNotice that even capable models can fail with a **very simple** test case!\n\n for _ in range(5): text = \"The solar system is large, but earth has only 1 moon.\" print(runnable.invoke({\"text\": text, \"examples\": []}))\n\n people=[Person(name='earth', hair_color='null', height_in_meters='null')]people=[Person(name='earth', hair_color='null', height_in_meters='null')]people=[]people=[Person(name='earth', hair_color='null', height_in_meters='null')]people=[]\n\nWith examples 😻[​](#with-examples- \"Direct link to With examples 😻\")\n----------------------------------------------------------------------\n\nReference examples helps to fix the failure!\n\n for _ in range(5): text = \"The solar system is large, but earth has only 1 moon.\" print(runnable.invoke({\"text\": text, \"examples\": messages}))\n\n people=[]people=[]people=[]people=[]people=[]\n\nNote that we can see the few-shot examples as tool-calls in the [Langsmith trace](https://smith.langchain.com/public/4c436bc2-a1ce-440b-82f5-093947542e40/r).\n\nAnd we retain performance on a positive sample:\n\n runnable.invoke( { \"text\": \"My name is Harrison. My hair is black.\", \"examples\": messages, })\n\n Data(people=[Person(name='Harrison', hair_color='black', height_in_meters=None)])\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/extraction_examples.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to select examples by similarity\n\n](/v0.2/docs/how_to/example_selectors_similarity/)[\n\nNext\n\nHow to handle long text when doing extraction\n\n](/v0.2/docs/how_to/extraction_long_text/)\n\n* [Define the schema](#define-the-schema)\n* [Define reference examples](#define-reference-examples)\n* [Create an extractor](#create-an-extractor)\n* [Without examples 😿](#without-examples-)\n* [With examples 😻](#with-examples-)"},"last_modified":{"kind":"null"}}},{"rowIdx":1364,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/integrations/platforms/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* Providers\n\nOn this page\n\nProviders\n=========\n\ninfo\n\nIf you'd like to write your own integration, see [Extending LangChain](/v0.2/docs/how_to/#custom). If you'd like to contribute an integration, see [Contributing integrations](/v0.2/docs/contributing/integrations/).\n\nLangChain integrates with many providers.\n\nPartner Packages[​](#partner-packages \"Direct link to Partner Packages\")\n------------------------------------------------------------------------\n\nThese providers have standalone `langchain-{provider}` packages for improved versioning, dependency management and testing.\n\n* [AI21](/v0.2/docs/integrations/providers/ai21/)\n* [Airbyte](/v0.2/docs/integrations/providers/airbyte/)\n* [Amazon Web Services](/v0.2/docs/integrations/platforms/aws/)\n* [Anthropic](/v0.2/docs/integrations/platforms/anthropic/)\n* [Astra DB](/v0.2/docs/integrations/providers/astradb/)\n* [Cohere](/v0.2/docs/integrations/providers/cohere/)\n* [Couchbase](/v0.2/docs/integrations/providers/couchbase/)\n* [Elasticsearch](/v0.2/docs/integrations/providers/elasticsearch/)\n* [Exa Search](/v0.2/docs/integrations/providers/exa_search/)\n* [Fireworks](/v0.2/docs/integrations/providers/fireworks/)\n* [Google](/v0.2/docs/integrations/platforms/google/)\n* [Groq](/v0.2/docs/integrations/providers/groq/)\n* [IBM](/v0.2/docs/integrations/providers/ibm/)\n* [MistralAI](/v0.2/docs/integrations/providers/mistralai/)\n* [MongoDB](/v0.2/docs/integrations/providers/mongodb_atlas/)\n* [Nomic](/v0.2/docs/integrations/providers/nomic/)\n* [Nvidia](/v0.2/docs/integrations/providers/nvidia/)\n* [OpenAI](/v0.2/docs/integrations/platforms/openai/)\n* [Pinecone](/v0.2/docs/integrations/providers/pinecone/)\n* [Qdrant](/v0.2/docs/integrations/providers/qdrant/)\n* [Robocorp](/v0.2/docs/integrations/providers/robocorp/)\n* [Together AI](/v0.2/docs/integrations/providers/together/)\n* [Upstage](/v0.2/docs/integrations/providers/upstage/)\n* [Voyage AI](/v0.2/docs/integrations/providers/voyageai/)\n\nFeatured Community Providers[​](#featured-community-providers \"Direct link to Featured Community Providers\")\n------------------------------------------------------------------------------------------------------------\n\n* [Hugging Face](/v0.2/docs/integrations/platforms/huggingface/)\n* [Microsoft](/v0.2/docs/integrations/platforms/microsoft/)\n\nAll Providers[​](#all-providers \"Direct link to All Providers\")\n---------------------------------------------------------------\n\nClick [here](/v0.2/docs/integrations/providers/) to see all providers.\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/integrations/platforms/index.mdx)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nNext\n\nProviders\n\n](/v0.2/docs/integrations/platforms/)\n\n* [Partner Packages](#partner-packages)\n* [Featured Community Providers](#featured-community-providers)\n* [All Providers](#all-providers)"},"last_modified":{"kind":"null"}}},{"rowIdx":1365,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/"},"markdown":{"kind":"string","value":"!function(){function t(t){document.documentElement.setAttribute(\"data-theme\",t)}var e=function(){var t=null;try{t=new URLSearchParams(window.location.search).get(\"docusaurus-theme\")}catch(t){}return t}()||function(){var t=null;try{t=localStorage.getItem(\"theme\")}catch(t){}return t}();null!==e?t(e):window.matchMedia(\"(prefers-color-scheme: dark)\").matches?t(\"dark\"):(window.matchMedia(\"(prefers-color-scheme: light)\").matches,t(\"light\"))}(),document.documentElement.setAttribute(\"data-announcement-bar-initially-dismissed\",function(){try{return\"true\"===localStorage.getItem(\"docusaurus.announcement.dismiss\")}catch(t){}return!1}())"},"last_modified":{"kind":"null"}}},{"rowIdx":1366,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/extraction_parse/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to use prompting alone (no tool calling) to do extraction\n\nOn this page\n\nHow to use prompting alone (no tool calling) to do extraction\n=============================================================\n\nTool calling features are not required for generating structured output from LLMs. LLMs that are able to follow prompt instructions well can be tasked with outputting information in a given format.\n\nThis approach relies on designing good prompts and then parsing the output of the LLMs to make them extract information well.\n\nTo extract data without tool-calling features:\n\n1. Instruct the LLM to generate text following an expected format (e.g., JSON with a certain schema);\n2. Use [output parsers](/v0.2/docs/concepts/#output-parsers) to structure the model response into a desired Python object.\n\nFirst we select a LLM:\n\n* OpenAI\n* Anthropic\n* Azure\n* Google\n* Cohere\n* FireworksAI\n* Groq\n* MistralAI\n* TogetherAI\n\n pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"OPENAI_API_KEY\"] = getpass.getpass()from langchain_openai import ChatOpenAImodel = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")\n\n pip install -qU langchain-anthropic\n\n import getpassimport osos.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()from langchain_anthropic import ChatAnthropicmodel = ChatAnthropic(model=\"claude-3-sonnet-20240229\")\n\n pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"AZURE_OPENAI_API_KEY\"] = getpass.getpass()from langchain_openai import AzureChatOpenAImodel = AzureChatOpenAI( azure_endpoint=os.environ[\"AZURE_OPENAI_ENDPOINT\"], azure_deployment=os.environ[\"AZURE_OPENAI_DEPLOYMENT_NAME\"], openai_api_version=os.environ[\"AZURE_OPENAI_API_VERSION\"],)\n\n pip install -qU langchain-google-vertexai\n\n import getpassimport osos.environ[\"GOOGLE_API_KEY\"] = getpass.getpass()from langchain_google_vertexai import ChatVertexAImodel = ChatVertexAI(model=\"gemini-pro\")\n\n pip install -qU langchain-cohere\n\n import getpassimport osos.environ[\"COHERE_API_KEY\"] = getpass.getpass()from langchain_cohere import ChatCoheremodel = ChatCohere(model=\"command-r\")\n\n pip install -qU langchain-fireworks\n\n import getpassimport osos.environ[\"FIREWORKS_API_KEY\"] = getpass.getpass()from langchain_fireworks import ChatFireworksmodel = ChatFireworks(model=\"accounts/fireworks/models/mixtral-8x7b-instruct\")\n\n pip install -qU langchain-groq\n\n import getpassimport osos.environ[\"GROQ_API_KEY\"] = getpass.getpass()from langchain_groq import ChatGroqmodel = ChatGroq(model=\"llama3-8b-8192\")\n\n pip install -qU langchain-mistralai\n\n import getpassimport osos.environ[\"MISTRAL_API_KEY\"] = getpass.getpass()from langchain_mistralai import ChatMistralAImodel = ChatMistralAI(model=\"mistral-large-latest\")\n\n pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"TOGETHER_API_KEY\"] = getpass.getpass()from langchain_openai import ChatOpenAImodel = ChatOpenAI( base_url=\"https://api.together.xyz/v1\", api_key=os.environ[\"TOGETHER_API_KEY\"], model=\"mistralai/Mixtral-8x7B-Instruct-v0.1\",)\n\ntip\n\nThis tutorial is meant to be simple, but generally should really include reference examples to squeeze out performance!\n\nUsing PydanticOutputParser[​](#using-pydanticoutputparser \"Direct link to Using PydanticOutputParser\")\n------------------------------------------------------------------------------------------------------\n\nThe following example uses the built-in `PydanticOutputParser` to parse the output of a chat model.\n\n from typing import List, Optionalfrom langchain_core.output_parsers import PydanticOutputParserfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.pydantic_v1 import BaseModel, Field, validatorclass Person(BaseModel): \"\"\"Information about a person.\"\"\" name: str = Field(..., description=\"The name of the person\") height_in_meters: float = Field( ..., description=\"The height of the person expressed in meters.\" )class People(BaseModel): \"\"\"Identifying information about all people in a text.\"\"\" people: List[Person]# Set up a parserparser = PydanticOutputParser(pydantic_object=People)# Promptprompt = ChatPromptTemplate.from_messages( [ ( \"system\", \"Answer the user query. Wrap the output in `json` tags\\n{format_instructions}\", ), (\"human\", \"{query}\"), ]).partial(format_instructions=parser.get_format_instructions())\n\n**API Reference:**[PydanticOutputParser](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.pydantic.PydanticOutputParser.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html)\n\nLet's take a look at what information is sent to the model\n\n query = \"Anna is 23 years old and she is 6 feet tall\"\n\n print(prompt.format_prompt(query=query).to_string())\n\n System: Answer the user query. Wrap the output in `json` tagsThe output should be formatted as a JSON instance that conforms to the JSON schema below.As an example, for the schema {\"properties\": {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}the object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.Here is the output schema:\n\n{\"description\": \"Identifying information about all people in a text.\", \"properties\": {\"people\": {\"title\": \"People\", \"type\": \"array\", \"items\": {\"$ref\": \"#/definitions/Person\"}}}, \"required\": \\[\"people\"\\], \"definitions\": {\"Person\": {\"title\": \"Person\", \"description\": \"Information about a person.\", \"type\": \"object\", \"properties\": {\"name\": {\"title\": \"Name\", \"description\": \"The name of the person\", \"type\": \"string\"}, \"height\\_in\\_meters\": {\"title\": \"Height In Meters\", \"description\": \"The height of the person expressed in meters.\", \"type\": \"number\"}}, \"required\": \\[\"name\", \"height\\_in\\_meters\"\\]}}}\n\n Human: Anna is 23 years old and she is 6 feet tall\n\nHaving defined our prompt, we simply chain together the prompt, model and output parser:\n\n chain = prompt | model | parserchain.invoke({\"query\": query})\n\n People(people=[Person(name='Anna', height_in_meters=1.83)])\n\nCheck out the associated [Langsmith trace](https://smith.langchain.com/public/92ed52a3-92b9-45af-a663-0a9c00e5e396/r).\n\nNote that the schema shows up in two places:\n\n1. In the prompt, via `parser.get_format_instructions()`;\n2. In the chain, to receive the formatted output and structure it into a Python object (in this case, the Pydantic object `People`).\n\nCustom Parsing[​](#custom-parsing \"Direct link to Custom Parsing\")\n------------------------------------------------------------------\n\nIf desired, it's easy to create a custom prompt and parser with `LangChain` and `LCEL`.\n\nTo create a custom parser, define a function to parse the output from the model (typically an [AIMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html)) into an object of your choice.\n\nSee below for a simple implementation of a JSON parser.\n\n import jsonimport refrom typing import List, Optionalfrom langchain_anthropic.chat_models import ChatAnthropicfrom langchain_core.messages import AIMessagefrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.pydantic_v1 import BaseModel, Field, validatorclass Person(BaseModel): \"\"\"Information about a person.\"\"\" name: str = Field(..., description=\"The name of the person\") height_in_meters: float = Field( ..., description=\"The height of the person expressed in meters.\" )class People(BaseModel): \"\"\"Identifying information about all people in a text.\"\"\" people: List[Person]# Promptprompt = ChatPromptTemplate.from_messages( [ ( \"system\", \"Answer the user query. Output your answer as JSON that \" \"matches the given schema: ```json\\n{schema}\\n```. \" \"Make sure to wrap the answer in ```json and ``` tags\", ), (\"human\", \"{query}\"), ]).partial(schema=People.schema())# Custom parserdef extract_json(message: AIMessage) -> List[dict]: \"\"\"Extracts JSON content from a string where JSON is embedded between ```json and ``` tags. Parameters: text (str): The text containing the JSON content. Returns: list: A list of extracted JSON strings. \"\"\" text = message.content # Define the regular expression pattern to match JSON blocks pattern = r\"```json(.*?)```\" # Find all non-overlapping matches of the pattern in the string matches = re.findall(pattern, text, re.DOTALL) # Return the list of matched JSON strings, stripping any leading or trailing whitespace try: return [json.loads(match.strip()) for match in matches] except Exception: raise ValueError(f\"Failed to parse: {message}\")\n\n**API Reference:**[ChatAnthropic](https://api.python.langchain.com/en/latest/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html) | [AIMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html)\n\n query = \"Anna is 23 years old and she is 6 feet tall\"print(prompt.format_prompt(query=query).to_string())\n\n System: Answer the user query. Output your answer as JSON that matches the given schema: ```json{'title': 'People', 'description': 'Identifying information about all people in a text.', 'type': 'object', 'properties': {'people': {'title': 'People', 'type': 'array', 'items': {'$ref': '#/definitions/Person'}}}, 'required': ['people'], 'definitions': {'Person': {'title': 'Person', 'description': 'Information about a person.', 'type': 'object', 'properties': {'name': {'title': 'Name', 'description': 'The name of the person', 'type': 'string'}, 'height_in_meters': {'title': 'Height In Meters', 'description': 'The height of the person expressed in meters.', 'type': 'number'}}, 'required': ['name', 'height_in_meters']}}}```. Make sure to wrap the answer in ```json and ``` tagsHuman: Anna is 23 years old and she is 6 feet tall\n\n chain = prompt | model | extract_jsonchain.invoke({\"query\": query})\n\n [{'people': [{'name': 'Anna', 'height_in_meters': 1.83}]}]\n\nOther Libraries[​](#other-libraries \"Direct link to Other Libraries\")\n---------------------------------------------------------------------\n\nIf you're looking at extracting using a parsing approach, check out the [Kor](https://eyurtsev.github.io/kor/) library. It's written by one of the `LangChain` maintainers and it helps to craft a prompt that takes examples into account, allows controlling formats (e.g., JSON or CSV) and expresses the schema in TypeScript. It seems to work pretty!\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/extraction_parse.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to handle long text when doing extraction\n\n](/v0.2/docs/how_to/extraction_long_text/)[\n\nNext\n\nHow to add fallbacks to a runnable\n\n](/v0.2/docs/how_to/fallbacks/)\n\n* [Using PydanticOutputParser](#using-pydanticoutputparser)\n* [Custom Parsing](#custom-parsing)\n* [Other Libraries](#other-libraries)"},"last_modified":{"kind":"null"}}},{"rowIdx":1367,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/hybrid/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* Hybrid Search\n\nOn this page\n\nHybrid Search\n=============\n\nThe standard search in LangChain is done by vector similarity. However, a number of vectorstores implementations (Astra DB, ElasticSearch, Neo4J, AzureSearch, ...) also support more advanced search combining vector similarity search and other search techniques (full-text, BM25, and so on). This is generally referred to as \"Hybrid\" search.\n\n**Step 1: Make sure the vectorstore you are using supports hybrid search**\n\nAt the moment, there is no unified way to perform hybrid search in LangChain. Each vectorstore may have their own way to do it. This is generally exposed as a keyword argument that is passed in during `similarity_search`. By reading the documentation or source code, figure out whether the vectorstore you are using supports hybrid search, and, if so, how to use it.\n\n**Step 2: Add that parameter as a configurable field for the chain**\n\nThis will let you easily call the chain and configure any relevant flags at runtime. See [this documentation](/v0.2/docs/how_to/configure/) for more information on configuration.\n\n**Step 3: Call the chain with that configurable field**\n\nNow, at runtime you can call this chain with configurable field.\n\nCode Example[​](#code-example \"Direct link to Code Example\")\n------------------------------------------------------------\n\nLet's see a concrete example of what this looks like in code. We will use the Cassandra/CQL interface of Astra DB for this example.\n\nInstall the following Python package:\n\n !pip install \"cassio>=0.1.7\"\n\nGet the [connection secrets](https://docs.datastax.com/en/astra/astra-db-vector/get-started/quickstart.html).\n\nInitialize cassio:\n\n import cassiocassio.init( database_id=\"Your database ID\", token=\"Your application token\", keyspace=\"Your key space\",)\n\nCreate the Cassandra VectorStore with a standard [index analyzer](https://docs.datastax.com/en/astra/astra-db-vector/cql/use-analyzers-with-cql.html). The index analyzer is needed to enable term matching.\n\n from cassio.table.cql import STANDARD_ANALYZERfrom langchain_community.vectorstores import Cassandrafrom langchain_openai import OpenAIEmbeddingsembeddings = OpenAIEmbeddings()vectorstore = Cassandra( embedding=embeddings, table_name=\"test_hybrid\", body_index_options=[STANDARD_ANALYZER], session=None, keyspace=None,)vectorstore.add_texts( [ \"In 2023, I visited Paris\", \"In 2022, I visited New York\", \"In 2021, I visited New Orleans\", ])\n\n**API Reference:**[Cassandra](https://api.python.langchain.com/en/latest/vectorstores/langchain_community.vectorstores.cassandra.Cassandra.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html)\n\nIf we do a standard similarity search, we get all the documents:\n\n vectorstore.as_retriever().invoke(\"What city did I visit last?\")\n\n [Document(page_content='In 2022, I visited New York'),Document(page_content='In 2023, I visited Paris'),Document(page_content='In 2021, I visited New Orleans')]\n\nThe Astra DB vectorstore `body_search` argument can be used to filter the search on the term `new`.\n\n vectorstore.as_retriever(search_kwargs={\"body_search\": \"new\"}).invoke( \"What city did I visit last?\")\n\n [Document(page_content='In 2022, I visited New York'),Document(page_content='In 2021, I visited New Orleans')]\n\nWe can now create the chain that we will use to do question-answering over\n\n from langchain_core.output_parsers import StrOutputParserfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.runnables import ( ConfigurableField, RunnablePassthrough,)from langchain_openai import ChatOpenAI\n\n**API Reference:**[StrOutputParser](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.string.StrOutputParser.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [ConfigurableField](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.utils.ConfigurableField.html) | [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html)\n\nThis is basic question-answering chain set up.\n\n template = \"\"\"Answer the question based only on the following context:{context}Question: {question}\"\"\"prompt = ChatPromptTemplate.from_template(template)model = ChatOpenAI()retriever = vectorstore.as_retriever()\n\nHere we mark the retriever as having a configurable field. All vectorstore retrievers have `search_kwargs` as a field. This is just a dictionary, with vectorstore specific fields\n\n configurable_retriever = retriever.configurable_fields( search_kwargs=ConfigurableField( id=\"search_kwargs\", name=\"Search Kwargs\", description=\"The search kwargs to use\", ))\n\nWe can now create the chain using our configurable retriever\n\n chain = ( {\"context\": configurable_retriever, \"question\": RunnablePassthrough()} | prompt | model | StrOutputParser())\n\n chain.invoke(\"What city did I visit last?\")\n\n Paris\n\nWe can now invoke the chain with configurable options. `search_kwargs` is the id of the configurable field. The value is the search kwargs to use for Astra DB.\n\n chain.invoke( \"What city did I visit last?\", config={\"configurable\": {\"search_kwargs\": {\"body_search\": \"new\"}}},)\n\n New York\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/hybrid.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to filter messages\n\n](/v0.2/docs/how_to/filter_messages/)[\n\nNext\n\nHow to use the LangChain indexing API\n\n](/v0.2/docs/how_to/indexing/)\n\n* [Code Example](#code-example)"},"last_modified":{"kind":"null"}}},{"rowIdx":1368,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/extraction_long_text/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to handle long text when doing extraction\n\nOn this page\n\nHow to handle long text when doing extraction\n=============================================\n\nWhen working with files, like PDFs, you're likely to encounter text that exceeds your language model's context window. To process this text, consider these strategies:\n\n1. **Change LLM** Choose a different LLM that supports a larger context window.\n2. **Brute Force** Chunk the document, and extract content from each chunk.\n3. **RAG** Chunk the document, index the chunks, and only extract content from a subset of chunks that look \"relevant\".\n\nKeep in mind that these strategies have different trade off and the best strategy likely depends on the application that you're designing!\n\nThis guide demonstrates how to implement strategies 2 and 3.\n\nSet up[​](#set-up \"Direct link to Set up\")\n------------------------------------------\n\nWe need some example data! Let's download an article about [cars from wikipedia](https://en.wikipedia.org/wiki/Car) and load it as a LangChain [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html).\n\n import reimport requestsfrom langchain_community.document_loaders import BSHTMLLoader# Download the contentresponse = requests.get(\"https://en.wikipedia.org/wiki/Car\")# Write it to a filewith open(\"car.html\", \"w\", encoding=\"utf-8\") as f: f.write(response.text)# Load it with an HTML parserloader = BSHTMLLoader(\"car.html\")document = loader.load()[0]# Clean up code# Replace consecutive new lines with a single new linedocument.page_content = re.sub(\"\\n\\n+\", \"\\n\", document.page_content)\n\n**API Reference:**[BSHTMLLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.html_bs.BSHTMLLoader.html)\n\n print(len(document.page_content))\n\n 79174\n\nDefine the schema[​](#define-the-schema \"Direct link to Define the schema\")\n---------------------------------------------------------------------------\n\nFollowing the [extraction tutorial](/v0.2/docs/tutorials/extraction/), we will use Pydantic to define the schema of information we wish to extract. In this case, we will extract a list of \"key developments\" (e.g., important historical events) that include a year and description.\n\nNote that we also include an `evidence` key and instruct the model to provide in verbatim the relevant sentences of text from the article. This allows us to compare the extraction results to (the model's reconstruction of) text from the original document.\n\n from typing import List, Optionalfrom langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholderfrom langchain_core.pydantic_v1 import BaseModel, Fieldclass KeyDevelopment(BaseModel): \"\"\"Information about a development in the history of cars.\"\"\" year: int = Field( ..., description=\"The year when there was an important historic development.\" ) description: str = Field( ..., description=\"What happened in this year? What was the development?\" ) evidence: str = Field( ..., description=\"Repeat in verbatim the sentence(s) from which the year and description information were extracted\", )class ExtractionData(BaseModel): \"\"\"Extracted information about key developments in the history of cars.\"\"\" key_developments: List[KeyDevelopment]# Define a custom prompt to provide instructions and any additional context.# 1) You can add examples into the prompt template to improve extraction quality# 2) Introduce additional parameters to take context into account (e.g., include metadata# about the document from which the text was extracted.)prompt = ChatPromptTemplate.from_messages( [ ( \"system\", \"You are an expert at identifying key historic development in text. \" \"Only extract important historic developments. Extract nothing if no important information can be found in the text.\", ), (\"human\", \"{text}\"), ])\n\n**API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [MessagesPlaceholder](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.MessagesPlaceholder.html)\n\nCreate an extractor[​](#create-an-extractor \"Direct link to Create an extractor\")\n---------------------------------------------------------------------------------\n\nLet's select an LLM. Because we are using tool-calling, we will need a model that supports a tool-calling feature. See [this table](/v0.2/docs/integrations/chat/) for available LLMs.\n\n* OpenAI\n* Anthropic\n* Azure\n* Google\n* Cohere\n* FireworksAI\n* Groq\n* MistralAI\n* TogetherAI\n\n pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"OPENAI_API_KEY\"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI(model=\"gpt-4-0125-preview\", temperature=0)\n\n pip install -qU langchain-anthropic\n\n import getpassimport osos.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()from langchain_anthropic import ChatAnthropicllm = ChatAnthropic(model=\"claude-3-sonnet-20240229\")\n\n pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"AZURE_OPENAI_API_KEY\"] = getpass.getpass()from langchain_openai import AzureChatOpenAIllm = AzureChatOpenAI( azure_endpoint=os.environ[\"AZURE_OPENAI_ENDPOINT\"], azure_deployment=os.environ[\"AZURE_OPENAI_DEPLOYMENT_NAME\"], openai_api_version=os.environ[\"AZURE_OPENAI_API_VERSION\"],)\n\n pip install -qU langchain-google-vertexai\n\n import getpassimport osos.environ[\"GOOGLE_API_KEY\"] = getpass.getpass()from langchain_google_vertexai import ChatVertexAIllm = ChatVertexAI(model=\"gemini-pro\")\n\n pip install -qU langchain-cohere\n\n import getpassimport osos.environ[\"COHERE_API_KEY\"] = getpass.getpass()from langchain_cohere import ChatCoherellm = ChatCohere(model=\"command-r\")\n\n pip install -qU langchain-fireworks\n\n import getpassimport osos.environ[\"FIREWORKS_API_KEY\"] = getpass.getpass()from langchain_fireworks import ChatFireworksllm = ChatFireworks(model=\"accounts/fireworks/models/mixtral-8x7b-instruct\")\n\n pip install -qU langchain-groq\n\n import getpassimport osos.environ[\"GROQ_API_KEY\"] = getpass.getpass()from langchain_groq import ChatGroqllm = ChatGroq(model=\"llama3-8b-8192\")\n\n pip install -qU langchain-mistralai\n\n import getpassimport osos.environ[\"MISTRAL_API_KEY\"] = getpass.getpass()from langchain_mistralai import ChatMistralAIllm = ChatMistralAI(model=\"mistral-large-latest\")\n\n pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"TOGETHER_API_KEY\"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI( base_url=\"https://api.together.xyz/v1\", api_key=os.environ[\"TOGETHER_API_KEY\"], model=\"mistralai/Mixtral-8x7B-Instruct-v0.1\",)\n\n extractor = prompt | llm.with_structured_output( schema=ExtractionData, include_raw=False,)\n\nBrute force approach[​](#brute-force-approach \"Direct link to Brute force approach\")\n------------------------------------------------------------------------------------\n\nSplit the documents into chunks such that each chunk fits into the context window of the LLMs.\n\n from langchain_text_splitters import TokenTextSplittertext_splitter = TokenTextSplitter( # Controls the size of each chunk chunk_size=2000, # Controls overlap between chunks chunk_overlap=20,)texts = text_splitter.split_text(document.page_content)\n\n**API Reference:**[TokenTextSplitter](https://api.python.langchain.com/en/latest/base/langchain_text_splitters.base.TokenTextSplitter.html)\n\nUse [batch](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html) functionality to run the extraction in **parallel** across each chunk!\n\ntip\n\nYou can often use .batch() to parallelize the extractions! `.batch` uses a threadpool under the hood to help you parallelize workloads.\n\nIf your model is exposed via an API, this will likely speed up your extraction flow!\n\n # Limit just to the first 3 chunks# so the code can be re-run quicklyfirst_few = texts[:3]extractions = extractor.batch( [{\"text\": text} for text in first_few], {\"max_concurrency\": 5}, # limit the concurrency by passing max concurrency!)\n\n### Merge results[​](#merge-results \"Direct link to Merge results\")\n\nAfter extracting data from across the chunks, we'll want to merge the extractions together.\n\n key_developments = []for extraction in extractions: key_developments.extend(extraction.key_developments)key_developments[:10]\n\n [KeyDevelopment(year=1966, description='The Toyota Corolla began production, becoming the best-selling series of automobile in history.', evidence='The Toyota Corolla, which has been in production since 1966, is the best-selling series of automobile in history.'), KeyDevelopment(year=1769, description='Nicolas-Joseph Cugnot built the first steam-powered road vehicle.', evidence='The French inventor Nicolas-Joseph Cugnot built the first steam-powered road vehicle in 1769.'), KeyDevelopment(year=1808, description='François Isaac de Rivaz designed and constructed the first internal combustion-powered automobile.', evidence='the Swiss inventor François Isaac de Rivaz designed and constructed the first internal combustion-powered automobile in 1808.'), KeyDevelopment(year=1886, description='Carl Benz patented his Benz Patent-Motorwagen, inventing the modern car.', evidence='The modern car—a practical, marketable automobile for everyday use—was invented in 1886, when the German inventor Carl Benz patented his Benz Patent-Motorwagen.'), KeyDevelopment(year=1908, description='Ford Model T, one of the first cars affordable by the masses, began production.', evidence='One of the first cars affordable by the masses was the Ford Model T, begun in 1908, an American car manufactured by the Ford Motor Company.'), KeyDevelopment(year=1888, description=\"Bertha Benz undertook the first road trip by car to prove the road-worthiness of her husband's invention.\", evidence=\"In August 1888, Bertha Benz, the wife of Carl Benz, undertook the first road trip by car, to prove the road-worthiness of her husband's invention.\"), KeyDevelopment(year=1896, description='Benz designed and patented the first internal-combustion flat engine, called boxermotor.', evidence='In 1896, Benz designed and patented the first internal-combustion flat engine, called boxermotor.'), KeyDevelopment(year=1897, description='Nesselsdorfer Wagenbau produced the Präsident automobil, one of the first factory-made cars in the world.', evidence='The first motor car in central Europe and one of the first factory-made cars in the world, was produced by Czech company Nesselsdorfer Wagenbau (later renamed to Tatra) in 1897, the Präsident automobil.'), KeyDevelopment(year=1890, description='Daimler Motoren Gesellschaft (DMG) was founded by Daimler and Maybach in Cannstatt.', evidence='Daimler and Maybach founded Daimler Motoren Gesellschaft (DMG) in Cannstatt in 1890.'), KeyDevelopment(year=1891, description='Auguste Doriot and Louis Rigoulot completed the longest trip by a petrol-driven vehicle with a Daimler powered Peugeot Type 3.', evidence='In 1891, Auguste Doriot and his Peugeot colleague Louis Rigoulot completed the longest trip by a petrol-driven vehicle when their self-designed and built Daimler powered Peugeot Type 3 completed 2,100 kilometres (1,300 mi) from Valentigney to Paris and Brest and back again.')]\n\nRAG based approach[​](#rag-based-approach \"Direct link to RAG based approach\")\n------------------------------------------------------------------------------\n\nAnother simple idea is to chunk up the text, but instead of extracting information from every chunk, just focus on the the most relevant chunks.\n\ncaution\n\nIt can be difficult to identify which chunks are relevant.\n\nFor example, in the `car` article we're using here, most of the article contains key development information. So by using **RAG**, we'll likely be throwing out a lot of relevant information.\n\nWe suggest experimenting with your use case and determining whether this approach works or not.\n\nTo implement the RAG based approach:\n\n1. Chunk up your document(s) and index them (e.g., in a vectorstore);\n2. Prepend the `extractor` chain with a retrieval step using the vectorstore.\n\nHere's a simple example that relies on the `FAISS` vectorstore.\n\n from langchain_community.vectorstores import FAISSfrom langchain_core.documents import Documentfrom langchain_core.runnables import RunnableLambdafrom langchain_openai import OpenAIEmbeddingsfrom langchain_text_splitters import CharacterTextSplittertexts = text_splitter.split_text(document.page_content)vectorstore = FAISS.from_texts(texts, embedding=OpenAIEmbeddings())retriever = vectorstore.as_retriever( search_kwargs={\"k\": 1}) # Only extract from first document\n\n**API Reference:**[FAISS](https://api.python.langchain.com/en/latest/vectorstores/langchain_community.vectorstores.faiss.FAISS.html) | [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) | [RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) | [CharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.CharacterTextSplitter.html)\n\nIn this case the RAG extractor is only looking at the top document.\n\n rag_extractor = { \"text\": retriever | (lambda docs: docs[0].page_content) # fetch content of top doc} | extractor\n\n results = rag_extractor.invoke(\"Key developments associated with cars\")\n\n for key_development in results.key_developments: print(key_development)\n\n year=1869 description='Mary Ward became one of the first documented car fatalities in Parsonstown, Ireland.' evidence='Mary Ward became one of the first documented car fatalities in 1869 in Parsonstown, Ireland,'year=1899 description=\"Henry Bliss one of the US's first pedestrian car casualties in New York City.\" evidence=\"Henry Bliss one of the US's first pedestrian car casualties in 1899 in New York City.\"year=2030 description='All fossil fuel vehicles will be banned in Amsterdam.' evidence='all fossil fuel vehicles will be banned in Amsterdam from 2030.'\n\nCommon issues[​](#common-issues \"Direct link to Common issues\")\n---------------------------------------------------------------\n\nDifferent methods have their own pros and cons related to cost, speed, and accuracy.\n\nWatch out for these issues:\n\n* Chunking content means that the LLM can fail to extract information if the information is spread across multiple chunks.\n* Large chunk overlap may cause the same information to be extracted twice, so be prepared to de-duplicate!\n* LLMs can make up data. If looking for a single fact across a large text and using a brute force approach, you may end up getting more made up data.\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/extraction_long_text.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to use reference examples when doing extraction\n\n](/v0.2/docs/how_to/extraction_examples/)[\n\nNext\n\nHow to use prompting alone (no tool calling) to do extraction\n\n](/v0.2/docs/how_to/extraction_parse/)\n\n* [Set up](#set-up)\n* [Define the schema](#define-the-schema)\n* [Create an extractor](#create-an-extractor)\n* [Brute force approach](#brute-force-approach)\n * [Merge results](#merge-results)\n* [RAG based approach](#rag-based-approach)\n* [Common issues](#common-issues)"},"last_modified":{"kind":"null"}}},{"rowIdx":1369,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/inspect/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to inspect runnables\n\nOn this page\n\nHow to inspect runnables\n========================\n\nPrerequisites\n\nThis guide assumes familiarity with the following concepts:\n\n* [LangChain Expression Language (LCEL)](/v0.2/docs/concepts/#langchain-expression-language)\n* [Chaining runnables](/v0.2/docs/how_to/sequence/)\n\nOnce you create a runnable with [LangChain Expression Language](/v0.2/docs/concepts/#langchain-expression-language), you may often want to inspect it to get a better sense for what is going on. This notebook covers some methods for doing so.\n\nThis guide shows some ways you can programmatically introspect the internal steps of chains. If you are instead interested in debugging issues in your chain, see [this section](/v0.2/docs/how_to/debugging/) instead.\n\nFirst, let's create an example chain. We will create one that does retrieval:\n\n %pip install -qU langchain langchain-openai faiss-cpu tiktoken\n\n from langchain_community.vectorstores import FAISSfrom langchain_core.output_parsers import StrOutputParserfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.runnables import RunnablePassthroughfrom langchain_openai import ChatOpenAI, OpenAIEmbeddingsvectorstore = FAISS.from_texts( [\"harrison worked at kensho\"], embedding=OpenAIEmbeddings())retriever = vectorstore.as_retriever()template = \"\"\"Answer the question based only on the following context:{context}Question: {question}\"\"\"prompt = ChatPromptTemplate.from_template(template)model = ChatOpenAI()chain = ( {\"context\": retriever, \"question\": RunnablePassthrough()} | prompt | model | StrOutputParser())\n\n**API Reference:**[FAISS](https://api.python.langchain.com/en/latest/vectorstores/langchain_community.vectorstores.faiss.FAISS.html) | [StrOutputParser](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.string.StrOutputParser.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html)\n\nGet a graph[​](#get-a-graph \"Direct link to Get a graph\")\n---------------------------------------------------------\n\nYou can use the `get_graph()` method to get a graph representation of the runnable:\n\n chain.get_graph()\n\nPrint a graph[​](#print-a-graph \"Direct link to Print a graph\")\n---------------------------------------------------------------\n\nWhile that is not super legible, you can use the `print_ascii()` method to show that graph in a way that's easier to understand:\n\n chain.get_graph().print_ascii()\n\n +---------------------------------+ | ParallelInput | +---------------------------------+ ** ** *** *** ** ** +----------------------+ +-------------+ | VectorStoreRetriever | | Passthrough | +----------------------+ +-------------+ ** ** *** *** ** ** +----------------------------------+ | ParallelOutput | +----------------------------------+ * * * +--------------------+ | ChatPromptTemplate | +--------------------+ * * * +------------+ | ChatOpenAI | +------------+ * * * +-----------------+ | StrOutputParser | +-----------------+ * * * +-----------------------+ | StrOutputParserOutput | +-----------------------+\n\nGet the prompts[​](#get-the-prompts \"Direct link to Get the prompts\")\n---------------------------------------------------------------------\n\nYou may want to see just the prompts that are used in a chain with the `get_prompts()` method:\n\n chain.get_prompts()\n\n [ChatPromptTemplate(input_variables=['context', 'question'], messages=[HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['context', 'question'], template='Answer the question based only on the following context:\\n{context}\\n\\nQuestion: {question}\\n'))])]\n\nNext steps[​](#next-steps \"Direct link to Next steps\")\n------------------------------------------------------\n\nYou've now learned how to introspect your composed LCEL chains.\n\nNext, check out the other how-to guides on runnables in this section, or the related how-to guide on [debugging your chains](/v0.2/docs/how_to/debugging/).\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/inspect.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to use the LangChain indexing API\n\n](/v0.2/docs/how_to/indexing/)[\n\nNext\n\nLangChain Expression Language Cheatsheet\n\n](/v0.2/docs/how_to/lcel_cheatsheet/)\n\n* [Get a graph](#get-a-graph)\n* [Print a graph](#print-a-graph)\n* [Get the prompts](#get-the-prompts)\n* [Next steps](#next-steps)"},"last_modified":{"kind":"null"}}},{"rowIdx":1370,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/fallbacks/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to add fallbacks to a runnable\n\nOn this page\n\nHow to add fallbacks to a runnable\n==================================\n\nWhen working with language models, you may often encounter issues from the underlying APIs, whether these be rate limiting or downtime. Therefore, as you go to move your LLM applications into production it becomes more and more important to safeguard against these. That's why we've introduced the concept of fallbacks.\n\nA **fallback** is an alternative plan that may be used in an emergency.\n\nCrucially, fallbacks can be applied not only on the LLM level but on the whole runnable level. This is important because often times different models require different prompts. So if your call to OpenAI fails, you don't just want to send the same prompt to Anthropic - you probably want to use a different prompt template and send a different version there.\n\nFallback for LLM API Errors[​](#fallback-for-llm-api-errors \"Direct link to Fallback for LLM API Errors\")\n---------------------------------------------------------------------------------------------------------\n\nThis is maybe the most common use case for fallbacks. A request to an LLM API can fail for a variety of reasons - the API could be down, you could have hit rate limits, any number of things. Therefore, using fallbacks can help protect against these types of things.\n\nIMPORTANT: By default, a lot of the LLM wrappers catch errors and retry. You will most likely want to turn those off when working with fallbacks. Otherwise the first wrapper will keep on retrying and not failing.\n\n %pip install --upgrade --quiet langchain langchain-openai\n\n from langchain_anthropic import ChatAnthropicfrom langchain_openai import ChatOpenAI\n\n**API Reference:**[ChatAnthropic](https://api.python.langchain.com/en/latest/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html)\n\nFirst, let's mock out what happens if we hit a RateLimitError from OpenAI\n\n from unittest.mock import patchimport httpxfrom openai import RateLimitErrorrequest = httpx.Request(\"GET\", \"/\")response = httpx.Response(200, request=request)error = RateLimitError(\"rate limit\", response=response, body=\"\")\n\n # Note that we set max_retries = 0 to avoid retrying on RateLimits, etcopenai_llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", max_retries=0)anthropic_llm = ChatAnthropic(model=\"claude-3-haiku-20240307\")llm = openai_llm.with_fallbacks([anthropic_llm])\n\n # Let's use just the OpenAI LLm first, to show that we run into an errorwith patch(\"openai.resources.chat.completions.Completions.create\", side_effect=error): try: print(openai_llm.invoke(\"Why did the chicken cross the road?\")) except RateLimitError: print(\"Hit error\")\n\n Hit error\n\n # Now let's try with fallbacks to Anthropicwith patch(\"openai.resources.chat.completions.Completions.create\", side_effect=error): try: print(llm.invoke(\"Why did the chicken cross the road?\")) except RateLimitError: print(\"Hit error\")\n\n content=' I don\\'t actually know why the chicken crossed the road, but here are some possible humorous answers:\\n\\n- To get to the other side!\\n\\n- It was too chicken to just stand there. \\n\\n- It wanted a change of scenery.\\n\\n- It wanted to show the possum it could be done.\\n\\n- It was on its way to a poultry farmers\\' convention.\\n\\nThe joke plays on the double meaning of \"the other side\" - literally crossing the road to the other side, or the \"other side\" meaning the afterlife. So it\\'s an anti-joke, with a silly or unexpected pun as the answer.' additional_kwargs={} example=False\n\nWe can use our \"LLM with Fallbacks\" as we would a normal LLM.\n\n from langchain_core.prompts import ChatPromptTemplateprompt = ChatPromptTemplate.from_messages( [ ( \"system\", \"You're a nice assistant who always includes a compliment in your response\", ), (\"human\", \"Why did the {animal} cross the road\"), ])chain = prompt | llmwith patch(\"openai.resources.chat.completions.Completions.create\", side_effect=error): try: print(chain.invoke({\"animal\": \"kangaroo\"})) except RateLimitError: print(\"Hit error\")\n\n**API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html)\n\n content=\" I don't actually know why the kangaroo crossed the road, but I can take a guess! Here are some possible reasons:\\n\\n- To get to the other side (the classic joke answer!)\\n\\n- It was trying to find some food or water \\n\\n- It was trying to find a mate during mating season\\n\\n- It was fleeing from a predator or perceived threat\\n\\n- It was disoriented and crossed accidentally \\n\\n- It was following a herd of other kangaroos who were crossing\\n\\n- It wanted a change of scenery or environment \\n\\n- It was trying to reach a new habitat or territory\\n\\nThe real reason is unknown without more context, but hopefully one of those potential explanations does the joke justice! Let me know if you have any other animal jokes I can try to decipher.\" additional_kwargs={} example=False\n\nFallback for Sequences[​](#fallback-for-sequences \"Direct link to Fallback for Sequences\")\n------------------------------------------------------------------------------------------\n\nWe can also create fallbacks for sequences, that are sequences themselves. Here we do that with two different models: ChatOpenAI and then normal OpenAI (which does not use a chat model). Because OpenAI is NOT a chat model, you likely want a different prompt.\n\n # First let's create a chain with a ChatModel# We add in a string output parser here so the outputs between the two are the same typefrom langchain_core.output_parsers import StrOutputParserchat_prompt = ChatPromptTemplate.from_messages( [ ( \"system\", \"You're a nice assistant who always includes a compliment in your response\", ), (\"human\", \"Why did the {animal} cross the road\"), ])# Here we're going to use a bad model name to easily create a chain that will errorchat_model = ChatOpenAI(model=\"gpt-fake\")bad_chain = chat_prompt | chat_model | StrOutputParser()\n\n**API Reference:**[StrOutputParser](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.string.StrOutputParser.html)\n\n # Now lets create a chain with the normal OpenAI modelfrom langchain_core.prompts import PromptTemplatefrom langchain_openai import OpenAIprompt_template = \"\"\"Instructions: You should always include a compliment in your response.Question: Why did the {animal} cross the road?\"\"\"prompt = PromptTemplate.from_template(prompt_template)llm = OpenAI()good_chain = prompt | llm\n\n**API Reference:**[PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html) | [OpenAI](https://api.python.langchain.com/en/latest/llms/langchain_openai.llms.base.OpenAI.html)\n\n # We can now create a final chain which combines the twochain = bad_chain.with_fallbacks([good_chain])chain.invoke({\"animal\": \"turtle\"})\n\n '\\n\\nAnswer: The turtle crossed the road to get to the other side, and I have to say he had some impressive determination.'\n\nFallback for Long Inputs[​](#fallback-for-long-inputs \"Direct link to Fallback for Long Inputs\")\n------------------------------------------------------------------------------------------------\n\nOne of the big limiting factors of LLMs is their context window. Usually, you can count and track the length of prompts before sending them to an LLM, but in situations where that is hard/complicated, you can fallback to a model with a longer context length.\n\n short_llm = ChatOpenAI()long_llm = ChatOpenAI(model=\"gpt-3.5-turbo-16k\")llm = short_llm.with_fallbacks([long_llm])\n\n inputs = \"What is the next number: \" + \", \".join([\"one\", \"two\"] * 3000)\n\n try: print(short_llm.invoke(inputs))except Exception as e: print(e)\n\n This model's maximum context length is 4097 tokens. However, your messages resulted in 12012 tokens. Please reduce the length of the messages.\n\n try: print(llm.invoke(inputs))except Exception as e: print(e)\n\n content='The next number in the sequence is two.' additional_kwargs={} example=False\n\nFallback to Better Model[​](#fallback-to-better-model \"Direct link to Fallback to Better Model\")\n------------------------------------------------------------------------------------------------\n\nOften times we ask models to output format in a specific format (like JSON). Models like GPT-3.5 can do this okay, but sometimes struggle. This naturally points to fallbacks - we can try with GPT-3.5 (faster, cheaper), but then if parsing fails we can use GPT-4.\n\n from langchain.output_parsers import DatetimeOutputParser\n\n**API Reference:**[DatetimeOutputParser](https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.datetime.DatetimeOutputParser.html)\n\n prompt = ChatPromptTemplate.from_template( \"what time was {event} (in %Y-%m-%dT%H:%M:%S.%fZ format - only return this value)\")\n\n # In this case we are going to do the fallbacks on the LLM + output parser level# Because the error will get raised in the OutputParseropenai_35 = ChatOpenAI() | DatetimeOutputParser()openai_4 = ChatOpenAI(model=\"gpt-4\") | DatetimeOutputParser()\n\n only_35 = prompt | openai_35fallback_4 = prompt | openai_35.with_fallbacks([openai_4])\n\n try: print(only_35.invoke({\"event\": \"the superbowl in 1994\"}))except Exception as e: print(f\"Error: {e}\")\n\n Error: Could not parse datetime string: The Super Bowl in 1994 took place on January 30th at 3:30 PM local time. Converting this to the specified format (%Y-%m-%dT%H:%M:%S.%fZ) results in: 1994-01-30T15:30:00.000Z\n\n try: print(fallback_4.invoke({\"event\": \"the superbowl in 1994\"}))except Exception as e: print(f\"Error: {e}\")\n\n 1994-01-30 15:30:00\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/fallbacks.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to use prompting alone (no tool calling) to do extraction\n\n](/v0.2/docs/how_to/extraction_parse/)[\n\nNext\n\nHow to filter messages\n\n](/v0.2/docs/how_to/filter_messages/)\n\n* [Fallback for LLM API Errors](#fallback-for-llm-api-errors)\n* [Fallback for Sequences](#fallback-for-sequences)\n* [Fallback for Long Inputs](#fallback-for-long-inputs)\n* [Fallback to Better Model](#fallback-to-better-model)"},"last_modified":{"kind":"null"}}},{"rowIdx":1371,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/filter_messages/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to filter messages\n\nOn this page\n\nHow to filter messages\n======================\n\nIn more complex chains and agents we might track state with a list of messages. This list can start to accumulate messages from multiple different models, speakers, sub-chains, etc., and we may only want to pass subsets of this full list of messages to each model call in the chain/agent.\n\nThe `filter_messages` utility makes it easy to filter messages by type, id, or name.\n\nBasic usage[​](#basic-usage \"Direct link to Basic usage\")\n---------------------------------------------------------\n\n from langchain_core.messages import ( AIMessage, HumanMessage, SystemMessage, filter_messages,)messages = [ SystemMessage(\"you are a good assistant\", id=\"1\"), HumanMessage(\"example input\", id=\"2\", name=\"example_user\"), AIMessage(\"example output\", id=\"3\", name=\"example_assistant\"), HumanMessage(\"real input\", id=\"4\", name=\"bob\"), AIMessage(\"real output\", id=\"5\", name=\"alice\"),]filter_messages(messages, include_types=\"human\")\n\n**API Reference:**[AIMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html) | [HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) | [SystemMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.system.SystemMessage.html) | [filter\\_messages](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.utils.filter_messages.html)\n\n [HumanMessage(content='example input', name='example_user', id='2'), HumanMessage(content='real input', name='bob', id='4')]\n\n filter_messages(messages, exclude_names=[\"example_user\", \"example_assistant\"])\n\n [SystemMessage(content='you are a good assistant', id='1'), HumanMessage(content='real input', name='bob', id='4'), AIMessage(content='real output', name='alice', id='5')]\n\n filter_messages(messages, include_types=[HumanMessage, AIMessage], exclude_ids=[\"3\"])\n\n [HumanMessage(content='example input', name='example_user', id='2'), HumanMessage(content='real input', name='bob', id='4'), AIMessage(content='real output', name='alice', id='5')]\n\nChaining[​](#chaining \"Direct link to Chaining\")\n------------------------------------------------\n\n`filter_messages` can be used in an imperatively (like above) or declaratively, making it easy to compose with other components in a chain:\n\n # pip install -U langchain-anthropicfrom langchain_anthropic import ChatAnthropicllm = ChatAnthropic(model=\"claude-3-sonnet-20240229\", temperature=0)# Notice we don't pass in messages. This creates# a RunnableLambda that takes messages as inputfilter_ = filter_messages(exclude_names=[\"example_user\", \"example_assistant\"])chain = filter_ | llmchain.invoke(messages)\n\n**API Reference:**[ChatAnthropic](https://api.python.langchain.com/en/latest/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html)\n\n AIMessage(content=[], response_metadata={'id': 'msg_01Wz7gBHahAwkZ1KCBNtXmwA', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 3}}, id='run-b5d8a3fe-004f-4502-a071-a6c025031827-0', usage_metadata={'input_tokens': 16, 'output_tokens': 3, 'total_tokens': 19})\n\nLooking at the LangSmith trace we can see that before the messages are passed to the model they are filtered: [https://smith.langchain.com/public/f808a724-e072-438e-9991-657cc9e7e253/r](https://smith.langchain.com/public/f808a724-e072-438e-9991-657cc9e7e253/r)\n\nLooking at just the filter\\_, we can see that it's a Runnable object that can be invoked like all Runnables:\n\n filter_.invoke(messages)\n\n [HumanMessage(content='real input', name='bob', id='4'), AIMessage(content='real output', name='alice', id='5')]\n\nAPI reference[​](#api-reference \"Direct link to API reference\")\n---------------------------------------------------------------\n\nFor a complete description of all arguments head to the API reference: [https://api.python.langchain.com/en/latest/messages/langchain\\_core.messages.utils.filter\\_messages.html](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.utils.filter_messages.html)\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/filter_messages.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to add fallbacks to a runnable\n\n](/v0.2/docs/how_to/fallbacks/)[\n\nNext\n\nHybrid Search\n\n](/v0.2/docs/how_to/hybrid/)\n\n* [Basic usage](#basic-usage)\n* [Chaining](#chaining)\n* [API reference](#api-reference)"},"last_modified":{"kind":"null"}}},{"rowIdx":1372,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/indexing/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to use the LangChain indexing API\n\nOn this page\n\nHow to use the LangChain indexing API\n=====================================\n\nHere, we will look at a basic indexing workflow using the LangChain indexing API.\n\nThe indexing API lets you load and keep in sync documents from any source into a vector store. Specifically, it helps:\n\n* Avoid writing duplicated content into the vector store\n* Avoid re-writing unchanged content\n* Avoid re-computing embeddings over unchanged content\n\nAll of which should save you time and money, as well as improve your vector search results.\n\nCrucially, the indexing API will work even with documents that have gone through several transformation steps (e.g., via text chunking) with respect to the original source documents.\n\nHow it works[​](#how-it-works \"Direct link to How it works\")\n------------------------------------------------------------\n\nLangChain indexing makes use of a record manager (`RecordManager`) that keeps track of document writes into the vector store.\n\nWhen indexing content, hashes are computed for each document, and the following information is stored in the record manager:\n\n* the document hash (hash of both page content and metadata)\n* write time\n* the source id -- each document should include information in its metadata to allow us to determine the ultimate source of this document\n\nDeletion modes[​](#deletion-modes \"Direct link to Deletion modes\")\n------------------------------------------------------------------\n\nWhen indexing documents into a vector store, it's possible that some existing documents in the vector store should be deleted. In certain situations you may want to remove any existing documents that are derived from the same sources as the new documents being indexed. In others you may want to delete all existing documents wholesale. The indexing API deletion modes let you pick the behavior you want:\n\nCleanup Mode\n\nDe-Duplicates Content\n\nParallelizable\n\nCleans Up Deleted Source Docs\n\nCleans Up Mutations of Source Docs and/or Derived Docs\n\nClean Up Timing\n\nNone\n\n✅\n\n✅\n\n❌\n\n❌\n\n\\-\n\nIncremental\n\n✅\n\n✅\n\n❌\n\n✅\n\nContinuously\n\nFull\n\n✅\n\n❌\n\n✅\n\n✅\n\nAt end of indexing\n\n`None` does not do any automatic clean up, allowing the user to manually do clean up of old content.\n\n`incremental` and `full` offer the following automated clean up:\n\n* If the content of the source document or derived documents has **changed**, both `incremental` or `full` modes will clean up (delete) previous versions of the content.\n* If the source document has been **deleted** (meaning it is not included in the documents currently being indexed), the `full` cleanup mode will delete it from the vector store correctly, but the `incremental` mode will not.\n\nWhen content is mutated (e.g., the source PDF file was revised) there will be a period of time during indexing when both the new and old versions may be returned to the user. This happens after the new content was written, but before the old version was deleted.\n\n* `incremental` indexing minimizes this period of time as it is able to do clean up continuously, as it writes.\n* `full` mode does the clean up after all batches have been written.\n\nRequirements[​](#requirements \"Direct link to Requirements\")\n------------------------------------------------------------\n\n1. Do not use with a store that has been pre-populated with content independently of the indexing API, as the record manager will not know that records have been inserted previously.\n2. Only works with LangChain `vectorstore`'s that support:\n * document addition by id (`add_documents` method with `ids` argument)\n * delete by id (`delete` method with `ids` argument)\n\nCompatible Vectorstores: `Aerospike`, `AnalyticDB`, `AstraDB`, `AwaDB`, `AzureCosmosDBNoSqlVectorSearch`, `AzureCosmosDBVectorSearch`, `Bagel`, `Cassandra`, `Chroma`, `CouchbaseVectorStore`, `DashVector`, `DatabricksVectorSearch`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `HanaDB`, `Milvus`, `MyScale`, `OpenSearchVectorSearch`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `Rockset`, `ScaNN`, `SupabaseVectorStore`, `SurrealDBStore`, `TimescaleVector`, `Vald`, `VDMS`, `Vearch`, `VespaStore`, `Weaviate`, `Yellowbrick`, `ZepVectorStore`, `TencentVectorDB`, `OpenSearchVectorSearch`.\n\nCaution[​](#caution \"Direct link to Caution\")\n---------------------------------------------\n\nThe record manager relies on a time-based mechanism to determine what content can be cleaned up (when using `full` or `incremental` cleanup modes).\n\nIf two tasks run back-to-back, and the first task finishes before the clock time changes, then the second task may not be able to clean up content.\n\nThis is unlikely to be an issue in actual settings for the following reasons:\n\n1. The RecordManager uses higher resolution timestamps.\n2. The data would need to change between the first and the second tasks runs, which becomes unlikely if the time interval between the tasks is small.\n3. Indexing tasks typically take more than a few ms.\n\nQuickstart[​](#quickstart \"Direct link to Quickstart\")\n------------------------------------------------------\n\n from langchain.indexes import SQLRecordManager, indexfrom langchain_core.documents import Documentfrom langchain_elasticsearch import ElasticsearchStorefrom langchain_openai import OpenAIEmbeddings\n\n**API Reference:**[SQLRecordManager](https://api.python.langchain.com/en/latest/indexes/langchain.indexes._sql_record_manager.SQLRecordManager.html) | [index](https://api.python.langchain.com/en/latest/indexing/langchain_core.indexing.api.index.html) | [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) | [ElasticsearchStore](https://api.python.langchain.com/en/latest/vectorstores/langchain_elasticsearch.vectorstores.ElasticsearchStore.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html)\n\nInitialize a vector store and set up the embeddings:\n\n collection_name = \"test_index\"embedding = OpenAIEmbeddings()vectorstore = ElasticsearchStore( es_url=\"http://localhost:9200\", index_name=\"test_index\", embedding=embedding)\n\nInitialize a record manager with an appropriate namespace.\n\n**Suggestion:** Use a namespace that takes into account both the vector store and the collection name in the vector store; e.g., 'redis/my\\_docs', 'chromadb/my\\_docs' or 'postgres/my\\_docs'.\n\n namespace = f\"elasticsearch/{collection_name}\"record_manager = SQLRecordManager( namespace, db_url=\"sqlite:///record_manager_cache.sql\")\n\nCreate a schema before using the record manager.\n\n record_manager.create_schema()\n\nLet's index some test documents:\n\n doc1 = Document(page_content=\"kitty\", metadata={\"source\": \"kitty.txt\"})doc2 = Document(page_content=\"doggy\", metadata={\"source\": \"doggy.txt\"})\n\nIndexing into an empty vector store:\n\n def _clear(): \"\"\"Hacky helper method to clear content. See the `full` mode section to to understand why it works.\"\"\" index([], record_manager, vectorstore, cleanup=\"full\", source_id_key=\"source\")\n\n### `None` deletion mode[​](#none-deletion-mode \"Direct link to none-deletion-mode\")\n\nThis mode does not do automatic clean up of old versions of content; however, it still takes care of content de-duplication.\n\n _clear()\n\n index( [doc1, doc1, doc1, doc1, doc1], record_manager, vectorstore, cleanup=None, source_id_key=\"source\",)\n\n {'num_added': 1, 'num_updated': 0, 'num_skipped': 0, 'num_deleted': 0}\n\n _clear()\n\n index([doc1, doc2], record_manager, vectorstore, cleanup=None, source_id_key=\"source\")\n\n {'num_added': 2, 'num_updated': 0, 'num_skipped': 0, 'num_deleted': 0}\n\nSecond time around all content will be skipped:\n\n index([doc1, doc2], record_manager, vectorstore, cleanup=None, source_id_key=\"source\")\n\n {'num_added': 0, 'num_updated': 0, 'num_skipped': 2, 'num_deleted': 0}\n\n### `\"incremental\"` deletion mode[​](#incremental-deletion-mode \"Direct link to incremental-deletion-mode\")\n\n _clear()\n\n index( [doc1, doc2], record_manager, vectorstore, cleanup=\"incremental\", source_id_key=\"source\",)\n\n {'num_added': 2, 'num_updated': 0, 'num_skipped': 0, 'num_deleted': 0}\n\nIndexing again should result in both documents getting **skipped** -- also skipping the embedding operation!\n\n index( [doc1, doc2], record_manager, vectorstore, cleanup=\"incremental\", source_id_key=\"source\",)\n\n {'num_added': 0, 'num_updated': 0, 'num_skipped': 2, 'num_deleted': 0}\n\nIf we provide no documents with incremental indexing mode, nothing will change.\n\n index([], record_manager, vectorstore, cleanup=\"incremental\", source_id_key=\"source\")\n\n {'num_added': 0, 'num_updated': 0, 'num_skipped': 0, 'num_deleted': 0}\n\nIf we mutate a document, the new version will be written and all old versions sharing the same source will be deleted.\n\n changed_doc_2 = Document(page_content=\"puppy\", metadata={\"source\": \"doggy.txt\"})\n\n index( [changed_doc_2], record_manager, vectorstore, cleanup=\"incremental\", source_id_key=\"source\",)\n\n {'num_added': 1, 'num_updated': 0, 'num_skipped': 0, 'num_deleted': 1}\n\n### `\"full\"` deletion mode[​](#full-deletion-mode \"Direct link to full-deletion-mode\")\n\nIn `full` mode the user should pass the `full` universe of content that should be indexed into the indexing function.\n\nAny documents that are not passed into the indexing function and are present in the vectorstore will be deleted!\n\nThis behavior is useful to handle deletions of source documents.\n\n _clear()\n\n all_docs = [doc1, doc2]\n\n index(all_docs, record_manager, vectorstore, cleanup=\"full\", source_id_key=\"source\")\n\n {'num_added': 2, 'num_updated': 0, 'num_skipped': 0, 'num_deleted': 0}\n\nSay someone deleted the first doc:\n\n del all_docs[0]\n\n all_docs\n\n [Document(page_content='doggy', metadata={'source': 'doggy.txt'})]\n\nUsing full mode will clean up the deleted content as well.\n\n index(all_docs, record_manager, vectorstore, cleanup=\"full\", source_id_key=\"source\")\n\n {'num_added': 0, 'num_updated': 0, 'num_skipped': 1, 'num_deleted': 1}\n\nSource[​](#source \"Direct link to Source\")\n------------------------------------------\n\nThe metadata attribute contains a field called `source`. This source should be pointing at the _ultimate_ provenance associated with the given document.\n\nFor example, if these documents are representing chunks of some parent document, the `source` for both documents should be the same and reference the parent document.\n\nIn general, `source` should always be specified. Only use a `None`, if you **never** intend to use `incremental` mode, and for some reason can't specify the `source` field correctly.\n\n from langchain_text_splitters import CharacterTextSplitter\n\n**API Reference:**[CharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.CharacterTextSplitter.html)\n\n doc1 = Document( page_content=\"kitty kitty kitty kitty kitty\", metadata={\"source\": \"kitty.txt\"})doc2 = Document(page_content=\"doggy doggy the doggy\", metadata={\"source\": \"doggy.txt\"})\n\n new_docs = CharacterTextSplitter( separator=\"t\", keep_separator=True, chunk_size=12, chunk_overlap=2).split_documents([doc1, doc2])new_docs\n\n [Document(page_content='kitty kit', metadata={'source': 'kitty.txt'}), Document(page_content='tty kitty ki', metadata={'source': 'kitty.txt'}), Document(page_content='tty kitty', metadata={'source': 'kitty.txt'}), Document(page_content='doggy doggy', metadata={'source': 'doggy.txt'}), Document(page_content='the doggy', metadata={'source': 'doggy.txt'})]\n\n _clear()\n\n index( new_docs, record_manager, vectorstore, cleanup=\"incremental\", source_id_key=\"source\",)\n\n {'num_added': 5, 'num_updated': 0, 'num_skipped': 0, 'num_deleted': 0}\n\n changed_doggy_docs = [ Document(page_content=\"woof woof\", metadata={\"source\": \"doggy.txt\"}), Document(page_content=\"woof woof woof\", metadata={\"source\": \"doggy.txt\"}),]\n\nThis should delete the old versions of documents associated with `doggy.txt` source and replace them with the new versions.\n\n index( changed_doggy_docs, record_manager, vectorstore, cleanup=\"incremental\", source_id_key=\"source\",)\n\n {'num_added': 2, 'num_updated': 0, 'num_skipped': 0, 'num_deleted': 2}\n\n vectorstore.similarity_search(\"dog\", k=30)\n\n [Document(page_content='woof woof', metadata={'source': 'doggy.txt'}), Document(page_content='woof woof woof', metadata={'source': 'doggy.txt'}), Document(page_content='tty kitty', metadata={'source': 'kitty.txt'}), Document(page_content='tty kitty ki', metadata={'source': 'kitty.txt'}), Document(page_content='kitty kit', metadata={'source': 'kitty.txt'})]\n\nUsing with loaders[​](#using-with-loaders \"Direct link to Using with loaders\")\n------------------------------------------------------------------------------\n\nIndexing can accept either an iterable of documents or else any loader.\n\n**Attention:** The loader **must** set source keys correctly.\n\n from langchain_core.document_loaders import BaseLoaderclass MyCustomLoader(BaseLoader): def lazy_load(self): text_splitter = CharacterTextSplitter( separator=\"t\", keep_separator=True, chunk_size=12, chunk_overlap=2 ) docs = [ Document(page_content=\"woof woof\", metadata={\"source\": \"doggy.txt\"}), Document(page_content=\"woof woof woof\", metadata={\"source\": \"doggy.txt\"}), ] yield from text_splitter.split_documents(docs) def load(self): return list(self.lazy_load())\n\n**API Reference:**[BaseLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_core.document_loaders.base.BaseLoader.html)\n\n _clear()\n\n loader = MyCustomLoader()\n\n loader.load()\n\n [Document(page_content='woof woof', metadata={'source': 'doggy.txt'}), Document(page_content='woof woof woof', metadata={'source': 'doggy.txt'})]\n\n index(loader, record_manager, vectorstore, cleanup=\"full\", source_id_key=\"source\")\n\n {'num_added': 2, 'num_updated': 0, 'num_skipped': 0, 'num_deleted': 0}\n\n vectorstore.similarity_search(\"dog\", k=30)\n\n [Document(page_content='woof woof', metadata={'source': 'doggy.txt'}), Document(page_content='woof woof woof', metadata={'source': 'doggy.txt'})]\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/indexing.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHybrid Search\n\n](/v0.2/docs/how_to/hybrid/)[\n\nNext\n\nHow to inspect runnables\n\n](/v0.2/docs/how_to/inspect/)\n\n* [How it works](#how-it-works)\n* [Deletion modes](#deletion-modes)\n* [Requirements](#requirements)\n* [Caution](#caution)\n* [Quickstart](#quickstart)\n * [`None` deletion mode](#none-deletion-mode)\n * [`\"incremental\"` deletion mode](#incremental-deletion-mode)\n * [`\"full\"` deletion mode](#full-deletion-mode)\n* [Source](#source)\n* [Using with loaders](#using-with-loaders)"},"last_modified":{"kind":"null"}}},{"rowIdx":1373,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.1/docs/get_started/introduction/"},"markdown":{"kind":"string","value":"* [](/v0.1/)\n* Get started\n\nOn this page\n\nIntroduction\n============\n\n**LangChain** is a framework for developing applications powered by large language models (LLMs).\n\nLangChain simplifies every stage of the LLM application lifecycle:\n\n* **Development**: Build your applications using LangChain's open-source [building blocks](/v0.1/docs/expression_language/) and [components](/v0.1/docs/modules/). Hit the ground running using [third-party integrations](/v0.1/docs/integrations/platforms/) and [Templates](/v0.1/docs/templates/).\n* **Productionization**: Use [LangSmith](/v0.1/docs/langsmith/) to inspect, monitor and evaluate your chains, so that you can continuously optimize and deploy with confidence.\n* **Deployment**: Turn any chain into an API with [LangServe](/v0.1/docs/langserve/).\n\n![Diagram outlining the hierarchical organization of the LangChain framework, displaying the interconnected parts across multiple layers.](/v0.1/svg/langchain_stack.svg \"LangChain Framework Overview\")![Diagram outlining the hierarchical organization of the LangChain framework, displaying the interconnected parts across multiple layers.](/v0.1/svg/langchain_stack_dark.svg \"LangChain Framework Overview\")\n\nConcretely, the framework consists of the following open-source libraries:\n\n* **`langchain-core`**: Base abstractions and LangChain Expression Language.\n* **`langchain-community`**: Third party integrations.\n * Partner packages (e.g. **`langchain-openai`**, **`langchain-anthropic`**, etc.): Some integrations have been further split into their own lightweight packages that only depend on **`langchain-core`**.\n* **`langchain`**: Chains, agents, and retrieval strategies that make up an application's cognitive architecture.\n* **[langgraph](https://langchain-ai.github.io/langgraph/)**: Build robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.\n* **[langserve](/v0.1/docs/langserve/)**: Deploy LangChain chains as REST APIs.\n\nThe broader ecosystem includes:\n\n* **[LangSmith](/v0.1/docs/langsmith/)**: A developer platform that lets you debug, test, evaluate, and monitor LLM applications and seamlessly integrates with LangChain.\n\nGet started[​](#get-started \"Direct link to Get started\")\n---------------------------------------------------------\n\nWe recommend following our [Quickstart](/v0.1/docs/get_started/quickstart/) guide to familiarize yourself with the framework by building your first LangChain application.\n\n[See here](/v0.1/docs/get_started/installation/) for instructions on how to install LangChain, set up your environment, and start building.\n\nnote\n\nThese docs focus on the Python LangChain library. [Head here](https://js.langchain.com) for docs on the JavaScript LangChain library.\n\nUse cases[​](#use-cases \"Direct link to Use cases\")\n---------------------------------------------------\n\nIf you're looking to build something specific or are more of a hands-on learner, check out our [use-cases](/v0.1/docs/use_cases/). They're walkthroughs and techniques for common end-to-end tasks, such as:\n\n* [Question answering with RAG](/v0.1/docs/use_cases/question_answering/)\n* [Extracting structured output](/v0.1/docs/use_cases/extraction/)\n* [Chatbots](/v0.1/docs/use_cases/chatbots/)\n* and more!\n\nExpression Language[​](#expression-language \"Direct link to Expression Language\")\n---------------------------------------------------------------------------------\n\nLangChain Expression Language (LCEL) is the foundation of many of LangChain's components, and is a declarative way to compose chains. LCEL was designed from day 1 to support putting prototypes in production, with no code changes, from the simplest “prompt + LLM” chain to the most complex chains.\n\n* **[Get started](/v0.1/docs/expression_language/)**: LCEL and its benefits\n* **[Runnable interface](/v0.1/docs/expression_language/interface/)**: The standard interface for LCEL objects\n* **[Primitives](/v0.1/docs/expression_language/primitives/)**: More on the primitives LCEL includes\n* and more!\n\nEcosystem[​](#ecosystem \"Direct link to Ecosystem\")\n---------------------------------------------------\n\n### [🦜🛠️ LangSmith](/v0.1/docs/langsmith/)[​](#️-langsmith \"Direct link to ️-langsmith\")\n\nTrace and evaluate your language model applications and intelligent agents to help you move from prototype to production.\n\n### [🦜🕸️ LangGraph](https://langchain-ai.github.io/langgraph/)[​](#️-langgraph \"Direct link to ️-langgraph\")\n\nBuild stateful, multi-actor applications with LLMs, built on top of (and intended to be used with) LangChain primitives.\n\n### [🦜🏓 LangServe](/v0.1/docs/langserve/)[​](#-langserve \"Direct link to -langserve\")\n\nDeploy LangChain runnables and chains as REST APIs.\n\n[Security](/v0.1/docs/security/)[​](#security \"Direct link to security\")\n------------------------------------------------------------------------\n\nRead up on our [Security](/v0.1/docs/security/) best practices to make sure you're developing safely with LangChain.\n\nAdditional resources[​](#additional-resources \"Direct link to Additional resources\")\n------------------------------------------------------------------------------------\n\n### [Components](/v0.1/docs/modules/)[​](#components \"Direct link to components\")\n\nLangChain provides standard, extendable interfaces and integrations for many different components, including:\n\n### [Integrations](/v0.1/docs/integrations/providers/)[​](#integrations \"Direct link to integrations\")\n\nLangChain is part of a rich ecosystem of tools that integrate with our framework and build on top of it. Check out our growing list of [integrations](/v0.1/docs/integrations/providers/).\n\n### [Guides](/v0.1/docs/guides/)[​](#guides \"Direct link to guides\")\n\nBest practices for developing with LangChain.\n\n### [API reference](https://api.python.langchain.com)[​](#api-reference \"Direct link to api-reference\")\n\nHead to the reference section for full documentation of all classes and methods in the LangChain and LangChain Experimental Python packages.\n\n### [Contributing](/v0.1/docs/contributing/)[​](#contributing \"Direct link to contributing\")\n\nCheck out the developer's guide for guidelines on contributing and help getting your dev environment set up.\n\n* * *\n\n#### Help us out by providing feedback on this documentation page:\n\n[\n\nNext\n\nIntroduction\n\n](/v0.1/docs/get_started/introduction/)\n\n* [Get started](#get-started)\n* [Use cases](#use-cases)\n* [Expression Language](#expression-language)\n* [Ecosystem](#ecosystem)\n * [🦜🛠️ LangSmith](#️-langsmith)\n * [🦜🕸️ LangGraph](#️-langgraph)\n * [🦜🏓 LangServe](#-langserve)\n* [Security](#security)\n* [Additional resources](#additional-resources)\n * [Components](#components)\n * [Integrations](#integrations)\n * [Guides](#guides)\n * [API reference](#api-reference)\n * [Contributing](#contributing)"},"last_modified":{"kind":"null"}}},{"rowIdx":1374,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/lcel_cheatsheet/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* LangChain Expression Language Cheatsheet\n\nOn this page\n\nLangChain Expression Language Cheatsheet\n========================================\n\nThis is a quick reference for all the most important LCEL primitives. For more advanced usage see the [LCEL how-to guides](/v0.2/docs/how_to/#langchain-expression-language-lcel) and the [full API reference](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html).\n\n### Invoke a runnable[​](#invoke-a-runnable \"Direct link to Invoke a runnable\")\n\n#### [Runnable.invoke()](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.invoke) / [Runnable.ainvoke()](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.ainvoke)[​](#runnableinvoke--runnableainvoke \"Direct link to runnableinvoke--runnableainvoke\")\n\n from langchain_core.runnables import RunnableLambdarunnable = RunnableLambda(lambda x: str(x))runnable.invoke(5)# Async variant:# await runnable.ainvoke(5)\n\n**API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html)\n\n '5'\n\n### Batch a runnable[​](#batch-a-runnable \"Direct link to Batch a runnable\")\n\n#### [Runnable.batch()](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.batch) / [Runnable.abatch()](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.abatch)[​](#runnablebatch--runnableabatch \"Direct link to runnablebatch--runnableabatch\")\n\n from langchain_core.runnables import RunnableLambdarunnable = RunnableLambda(lambda x: str(x))runnable.batch([7, 8, 9])# Async variant:# await runnable.abatch([7, 8, 9])\n\n**API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html)\n\n ['7', '8', '9']\n\n### Stream a runnable[​](#stream-a-runnable \"Direct link to Stream a runnable\")\n\n#### [Runnable.stream()](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.stream) / [Runnable.astream()](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.astream)[​](#runnablestream--runnableastream \"Direct link to runnablestream--runnableastream\")\n\n from langchain_core.runnables import RunnableLambdadef func(x): for y in x: yield str(y)runnable = RunnableLambda(func)for chunk in runnable.stream(range(5)): print(chunk)# Async variant:# async for chunk in await runnable.astream(range(5)):# print(chunk)\n\n**API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html)\n\n 01234\n\n### Compose runnables[​](#compose-runnables \"Direct link to Compose runnables\")\n\n#### Pipe operator `|`[​](#pipe-operator- \"Direct link to pipe-operator-\")\n\n from langchain_core.runnables import RunnableLambdarunnable1 = RunnableLambda(lambda x: {\"foo\": x})runnable2 = RunnableLambda(lambda x: [x] * 2)chain = runnable1 | runnable2chain.invoke(2)\n\n**API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html)\n\n [{'foo': 2}, {'foo': 2}]\n\n### Invoke runnables in parallel[​](#invoke-runnables-in-parallel \"Direct link to Invoke runnables in parallel\")\n\n#### [RunnableParallel](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html)[​](#runnableparallel \"Direct link to runnableparallel\")\n\n from langchain_core.runnables import RunnableLambda, RunnableParallelrunnable1 = RunnableLambda(lambda x: {\"foo\": x})runnable2 = RunnableLambda(lambda x: [x] * 2)chain = RunnableParallel(first=runnable1, second=runnable2)chain.invoke(2)\n\n**API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) | [RunnableParallel](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html)\n\n {'first': {'foo': 2}, 'second': [2, 2]}\n\n### Turn any function into a runnable[​](#turn-any-function-into-a-runnable \"Direct link to Turn any function into a runnable\")\n\n#### [RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html)[​](#runnablelambda \"Direct link to runnablelambda\")\n\n from langchain_core.runnables import RunnableLambdadef func(x): return x + 5runnable = RunnableLambda(func)runnable.invoke(2)\n\n**API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html)\n\n 7\n\n### Merge input and output dicts[​](#merge-input-and-output-dicts \"Direct link to Merge input and output dicts\")\n\n#### [RunnablePassthrough.assign](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html)[​](#runnablepassthroughassign \"Direct link to runnablepassthroughassign\")\n\n from langchain_core.runnables import RunnableLambda, RunnablePassthroughrunnable1 = RunnableLambda(lambda x: x[\"foo\"] + 7)chain = RunnablePassthrough.assign(bar=runnable1)chain.invoke({\"foo\": 10})\n\n**API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) | [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html)\n\n {'foo': 10, 'bar': 17}\n\n### Include input dict in output dict[​](#include-input-dict-in-output-dict \"Direct link to Include input dict in output dict\")\n\n#### [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html)[​](#runnablepassthrough \"Direct link to runnablepassthrough\")\n\n from langchain_core.runnables import ( RunnableLambda, RunnableParallel, RunnablePassthrough,)runnable1 = RunnableLambda(lambda x: x[\"foo\"] + 7)chain = RunnableParallel(bar=runnable1, baz=RunnablePassthrough())chain.invoke({\"foo\": 10})\n\n**API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) | [RunnableParallel](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html) | [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html)\n\n {'bar': 17, 'baz': {'foo': 10}}\n\n### Add default invocation args[​](#add-default-invocation-args \"Direct link to Add default invocation args\")\n\n#### [Runnable.bind](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.bind)[​](#runnablebind \"Direct link to runnablebind\")\n\n from typing import Optionalfrom langchain_core.runnables import RunnableLambdadef func(main_arg: dict, other_arg: Optional[str] = None) -> dict: if other_arg: return {**main_arg, **{\"foo\": other_arg}} return main_argrunnable1 = RunnableLambda(func)bound_runnable1 = runnable1.bind(other_arg=\"bye\")bound_runnable1.invoke({\"bar\": \"hello\"})\n\n**API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html)\n\n {'bar': 'hello', 'foo': 'bye'}\n\n### Add fallbacks[​](#add-fallbacks \"Direct link to Add fallbacks\")\n\n#### [Runnable.with\\_fallbacks](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_fallbacks)[​](#runnablewith_fallbacks \"Direct link to runnablewith_fallbacks\")\n\n from langchain_core.runnables import RunnableLambdarunnable1 = RunnableLambda(lambda x: x + \"foo\")runnable2 = RunnableLambda(lambda x: str(x) + \"foo\")chain = runnable1.with_fallbacks([runnable2])chain.invoke(5)\n\n**API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html)\n\n '5foo'\n\n### Add retries[​](#add-retries \"Direct link to Add retries\")\n\n#### [Runnable.with\\_retry](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_retry)[​](#runnablewith_retry \"Direct link to runnablewith_retry\")\n\n from langchain_core.runnables import RunnableLambdacounter = -1def func(x): global counter counter += 1 print(f\"attempt with {counter=}\") return x / counterchain = RunnableLambda(func).with_retry(stop_after_attempt=2)chain.invoke(2)\n\n**API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html)\n\n attempt with counter=0attempt with counter=1\n\n 2.0\n\n### Configure runnable execution[​](#configure-runnable-execution \"Direct link to Configure runnable execution\")\n\n#### [RunnableConfig](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.config.RunnableConfig.html)[​](#runnableconfig \"Direct link to runnableconfig\")\n\n from langchain_core.runnables import RunnableLambda, RunnableParallelrunnable1 = RunnableLambda(lambda x: {\"foo\": x})runnable2 = RunnableLambda(lambda x: [x] * 2)runnable3 = RunnableLambda(lambda x: str(x))chain = RunnableParallel(first=runnable1, second=runnable2, third=runnable3)chain.invoke(7, config={\"max_concurrency\": 2})\n\n**API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) | [RunnableParallel](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html)\n\n {'first': {'foo': 7}, 'second': [7, 7], 'third': '7'}\n\n### Add default config to runnable[​](#add-default-config-to-runnable \"Direct link to Add default config to runnable\")\n\n#### [Runnable.with\\_config](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_config)[​](#runnablewith_config \"Direct link to runnablewith_config\")\n\n from langchain_core.runnables import RunnableLambda, RunnableParallelrunnable1 = RunnableLambda(lambda x: {\"foo\": x})runnable2 = RunnableLambda(lambda x: [x] * 2)runnable3 = RunnableLambda(lambda x: str(x))chain = RunnableParallel(first=runnable1, second=runnable2, third=runnable3)configured_chain = chain.with_config(max_concurrency=2)chain.invoke(7)\n\n**API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) | [RunnableParallel](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html)\n\n {'first': {'foo': 7}, 'second': [7, 7], 'third': '7'}\n\n### Make runnable attributes configurable[​](#make-runnable-attributes-configurable \"Direct link to Make runnable attributes configurable\")\n\n#### [Runnable.with\\_configurable\\_fields](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableSerializable.html#langchain_core.runnables.base.RunnableSerializable.configurable_fields)[​](#runnablewith_configurable_fields \"Direct link to runnablewith_configurable_fields\")\n\n from typing import Any, Optionalfrom langchain_core.runnables import ( ConfigurableField, RunnableConfig, RunnableSerializable,)class FooRunnable(RunnableSerializable[dict, dict]): output_key: str def invoke( self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any ) -> list: return self._call_with_config(self.subtract_seven, input, config, **kwargs) def subtract_seven(self, input: dict) -> dict: return {self.output_key: input[\"foo\"] - 7}runnable1 = FooRunnable(output_key=\"bar\")configurable_runnable1 = runnable1.configurable_fields( output_key=ConfigurableField(id=\"output_key\"))configurable_runnable1.invoke( {\"foo\": 10}, config={\"configurable\": {\"output_key\": \"not bar\"}})\n\n**API Reference:**[ConfigurableField](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.utils.ConfigurableField.html) | [RunnableConfig](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.config.RunnableConfig.html) | [RunnableSerializable](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableSerializable.html)\n\n {'not bar': 3}\n\n configurable_runnable1.invoke({\"foo\": 10})\n\n {'bar': 3}\n\n### Make chain components configurable[​](#make-chain-components-configurable \"Direct link to Make chain components configurable\")\n\n#### [Runnable.with\\_configurable\\_alternatives](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableSerializable.html#langchain_core.runnables.base.RunnableSerializable.configurable_alternatives)[​](#runnablewith_configurable_alternatives \"Direct link to runnablewith_configurable_alternatives\")\n\n from typing import Any, Optionalfrom langchain_core.runnables import RunnableConfig, RunnableLambda, RunnableParallelclass ListRunnable(RunnableSerializable[Any, list]): def invoke( self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any ) -> list: return self._call_with_config(self.listify, input, config, **kwargs) def listify(self, input: Any) -> list: return [input]class StrRunnable(RunnableSerializable[Any, str]): def invoke( self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any ) -> list: return self._call_with_config(self.strify, input, config, **kwargs) def strify(self, input: Any) -> str: return str(input)runnable1 = RunnableLambda(lambda x: {\"foo\": x})configurable_runnable = ListRunnable().configurable_alternatives( ConfigurableField(id=\"second_step\"), default_key=\"list\", string=StrRunnable())chain = runnable1 | configurable_runnablechain.invoke(7, config={\"configurable\": {\"second_step\": \"string\"}})\n\n**API Reference:**[RunnableConfig](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.config.RunnableConfig.html) | [RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) | [RunnableParallel](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html)\n\n \"{'foo': 7}\"\n\n chain.invoke(7)\n\n [{'foo': 7}]\n\n### Build a chain dynamically based on input[​](#build-a-chain-dynamically-based-on-input \"Direct link to Build a chain dynamically based on input\")\n\n from langchain_core.runnables import RunnableLambda, RunnableParallelrunnable1 = RunnableLambda(lambda x: {\"foo\": x})runnable2 = RunnableLambda(lambda x: [x] * 2)chain = RunnableLambda(lambda x: runnable1 if x > 6 else runnable2)chain.invoke(7)\n\n**API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) | [RunnableParallel](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html)\n\n {'foo': 7}\n\n chain.invoke(5)\n\n [5, 5]\n\n### Generate a stream of events[​](#generate-a-stream-of-events \"Direct link to Generate a stream of events\")\n\n#### [Runnable.astream\\_events](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.astream_events)[​](#runnableastream_events \"Direct link to runnableastream_events\")\n\n # | echo: falseimport nest_asyncionest_asyncio.apply()\n\n from langchain_core.runnables import RunnableLambda, RunnableParallelrunnable1 = RunnableLambda(lambda x: {\"foo\": x}, name=\"first\")async def func(x): for _ in range(5): yield xrunnable2 = RunnableLambda(func, name=\"second\")chain = runnable1 | runnable2async for event in chain.astream_events(\"bar\", version=\"v2\"): print(f\"event={event['event']} | name={event['name']} | data={event['data']}\")\n\n**API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) | [RunnableParallel](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html)\n\n event=on_chain_start | name=RunnableSequence | data={'input': 'bar'}event=on_chain_start | name=first | data={}event=on_chain_stream | name=first | data={'chunk': {'foo': 'bar'}}event=on_chain_start | name=second | data={}event=on_chain_end | name=first | data={'output': {'foo': 'bar'}, 'input': 'bar'}event=on_chain_stream | name=second | data={'chunk': {'foo': 'bar'}}event=on_chain_stream | name=RunnableSequence | data={'chunk': {'foo': 'bar'}}event=on_chain_stream | name=second | data={'chunk': {'foo': 'bar'}}event=on_chain_stream | name=RunnableSequence | data={'chunk': {'foo': 'bar'}}event=on_chain_stream | name=second | data={'chunk': {'foo': 'bar'}}event=on_chain_stream | name=RunnableSequence | data={'chunk': {'foo': 'bar'}}event=on_chain_stream | name=second | data={'chunk': {'foo': 'bar'}}event=on_chain_stream | name=RunnableSequence | data={'chunk': {'foo': 'bar'}}event=on_chain_stream | name=second | data={'chunk': {'foo': 'bar'}}event=on_chain_stream | name=RunnableSequence | data={'chunk': {'foo': 'bar'}}event=on_chain_end | name=second | data={'output': {'foo': 'bar'}, 'input': {'foo': 'bar'}}event=on_chain_end | name=RunnableSequence | data={'output': {'foo': 'bar'}}\n\n### Yield batched outputs as they complete[​](#yield-batched-outputs-as-they-complete \"Direct link to Yield batched outputs as they complete\")\n\n#### [Runnable.batch\\_as\\_completed](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.batch_as_completed) / [Runnable.abatch\\_as\\_completed](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.abatch_as_completed)[​](#runnablebatch_as_completed--runnableabatch_as_completed \"Direct link to runnablebatch_as_completed--runnableabatch_as_completed\")\n\n import timefrom langchain_core.runnables import RunnableLambda, RunnableParallelrunnable1 = RunnableLambda(lambda x: time.sleep(x) or print(f\"slept {x}\"))for idx, result in runnable1.batch_as_completed([5, 1]): print(idx, result)# Async variant:# async for idx, result in runnable1.abatch_as_completed([5, 1]):# print(idx, result)\n\n**API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) | [RunnableParallel](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html)\n\n slept 11 Noneslept 50 None\n\n### Return subset of output dict[​](#return-subset-of-output-dict \"Direct link to Return subset of output dict\")\n\n#### [Runnable.pick](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.pick)[​](#runnablepick \"Direct link to runnablepick\")\n\n from langchain_core.runnables import RunnableLambda, RunnablePassthroughrunnable1 = RunnableLambda(lambda x: x[\"baz\"] + 5)chain = RunnablePassthrough.assign(foo=runnable1).pick([\"foo\", \"bar\"])chain.invoke({\"bar\": \"hi\", \"baz\": 2})\n\n**API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) | [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html)\n\n {'foo': 7, 'bar': 'hi'}\n\n### Declaratively make a batched version of a runnable[​](#declaratively-make-a-batched-version-of-a-runnable \"Direct link to Declaratively make a batched version of a runnable\")\n\n#### [Runnable.map](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.map)[​](#runnablemap \"Direct link to runnablemap\")\n\n from langchain_core.runnables import RunnableLambdarunnable1 = RunnableLambda(lambda x: list(range(x)))runnable2 = RunnableLambda(lambda x: x + 5)chain = runnable1 | runnable2.map()chain.invoke(3)\n\n**API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html)\n\n [5, 6, 7]\n\n### Get a graph representation of a runnable[​](#get-a-graph-representation-of-a-runnable \"Direct link to Get a graph representation of a runnable\")\n\n#### [Runnable.get\\_graph](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.get_graph)[​](#runnableget_graph \"Direct link to runnableget_graph\")\n\n from langchain_core.runnables import RunnableLambda, RunnableParallelrunnable1 = RunnableLambda(lambda x: {\"foo\": x})runnable2 = RunnableLambda(lambda x: [x] * 2)runnable3 = RunnableLambda(lambda x: str(x))chain = runnable1 | RunnableParallel(second=runnable2, third=runnable3)chain.get_graph().print_ascii()\n\n**API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) | [RunnableParallel](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html)\n\n +-------------+ | LambdaInput | +-------------+ * * * +------------------------------+ | Lambda(lambda x: {'foo': x}) | +------------------------------+ * * * +-----------------------------+ | ParallelInput | +-----------------------------+ **** *** **** **** ** ** +---------------------------+ +--------------------------+ | Lambda(lambda x: [x] * 2) | | Lambda(lambda x: str(x)) | +---------------------------+ +--------------------------+ **** *** **** **** ** ** +------------------------------+ | ParallelOutput | +------------------------------+\n\n### Get all prompts in a chain[​](#get-all-prompts-in-a-chain \"Direct link to Get all prompts in a chain\")\n\n#### [Runnable.get\\_prompts](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.get_prompts)[​](#runnableget_prompts \"Direct link to runnableget_prompts\")\n\n from langchain_core.prompts import ChatPromptTemplatefrom langchain_core.runnables import RunnableLambdaprompt1 = ChatPromptTemplate.from_messages( [(\"system\", \"good ai\"), (\"human\", \"{input}\")])prompt2 = ChatPromptTemplate.from_messages( [ (\"system\", \"really good ai\"), (\"human\", \"{input}\"), (\"ai\", \"{ai_output}\"), (\"human\", \"{input2}\"), ])fake_llm = RunnableLambda(lambda prompt: \"i am good ai\")chain = prompt1.assign(ai_output=fake_llm) | prompt2 | fake_llmfor i, prompt in enumerate(chain.get_prompts()): print(f\"**prompt {i=}**\\n\") print(prompt.pretty_repr()) print(\"\\n\" * 3)\n\n**API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html)\n\n **prompt i=0**================================ System Message ================================good ai================================ Human Message ================================={input}**prompt i=1**================================ System Message ================================really good ai================================ Human Message ================================={input}================================== AI Message =================================={ai_output}================================ Human Message ================================={input2}\n\n### Add lifecycle listeners[​](#add-lifecycle-listeners \"Direct link to Add lifecycle listeners\")\n\n#### [Runnable.with\\_listeners](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_listeners)[​](#runnablewith_listeners \"Direct link to runnablewith_listeners\")\n\n import timefrom langchain_core.runnables import RunnableLambdafrom langchain_core.tracers.schemas import Rundef on_start(run_obj: Run): print(\"start_time:\", run_obj.start_time)def on_end(run_obj: Run): print(\"end_time:\", run_obj.end_time)runnable1 = RunnableLambda(lambda x: time.sleep(x))chain = runnable1.with_listeners(on_start=on_start, on_end=on_end)chain.invoke(2)\n\n**API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) | [Run](https://api.python.langchain.com/en/latest/tracers/langchain_core.tracers.schemas.Run.html)\n\n start_time: 2024-05-17 23:04:00.951065+00:00end_time: 2024-05-17 23:04:02.958765+00:00\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/lcel_cheatsheet.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to inspect runnables\n\n](/v0.2/docs/how_to/inspect/)[\n\nNext\n\nHow to cache LLM responses\n\n](/v0.2/docs/how_to/llm_caching/)\n\n* [Invoke a runnable](#invoke-a-runnable)\n* [Batch a runnable](#batch-a-runnable)\n* [Stream a runnable](#stream-a-runnable)\n* [Compose runnables](#compose-runnables)\n* [Invoke runnables in parallel](#invoke-runnables-in-parallel)\n* [Turn any function into a runnable](#turn-any-function-into-a-runnable)\n* [Merge input and output dicts](#merge-input-and-output-dicts)\n* [Include input dict in output dict](#include-input-dict-in-output-dict)\n* [Add default invocation args](#add-default-invocation-args)\n* [Add fallbacks](#add-fallbacks)\n* [Add retries](#add-retries)\n* [Configure runnable execution](#configure-runnable-execution)\n* [Add default config to runnable](#add-default-config-to-runnable)\n* [Make runnable attributes configurable](#make-runnable-attributes-configurable)\n* [Make chain components configurable](#make-chain-components-configurable)\n* [Build a chain dynamically based on input](#build-a-chain-dynamically-based-on-input)\n* [Generate a stream of events](#generate-a-stream-of-events)\n* [Yield batched outputs as they complete](#yield-batched-outputs-as-they-complete)\n* [Return subset of output dict](#return-subset-of-output-dict)\n* [Declaratively make a batched version of a runnable](#declaratively-make-a-batched-version-of-a-runnable)\n* [Get a graph representation of a runnable](#get-a-graph-representation-of-a-runnable)\n* [Get all prompts in a chain](#get-all-prompts-in-a-chain)\n* [Add lifecycle listeners](#add-lifecycle-listeners)"},"last_modified":{"kind":"null"}}},{"rowIdx":1375,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/llm_token_usage_tracking/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to track token usage for LLMs\n\nOn this page\n\nHow to track token usage for LLMs\n=================================\n\nTracking token usage to calculate cost is an important part of putting your app in production. This guide goes over how to obtain this information from your LangChain model calls.\n\nPrerequisites\n\nThis guide assumes familiarity with the following concepts:\n\n* [LLMs](/v0.2/docs/concepts/#llms)\n\nUsing LangSmith[​](#using-langsmith \"Direct link to Using LangSmith\")\n---------------------------------------------------------------------\n\nYou can use [LangSmith](https://www.langchain.com/langsmith) to help track token usage in your LLM application. See the [LangSmith quick start guide](https://docs.smith.langchain.com/).\n\nUsing callbacks[​](#using-callbacks \"Direct link to Using callbacks\")\n---------------------------------------------------------------------\n\nThere are some API-specific callback context managers that allow you to track token usage across multiple calls. You'll need to check whether such an integration is available for your particular model.\n\nIf such an integration is not available for your model, you can create a custom callback manager by adapting the implementation of the [OpenAI callback manager](https://api.python.langchain.com/en/latest/_modules/langchain_community/callbacks/openai_info.html#OpenAICallbackHandler).\n\n### OpenAI[​](#openai \"Direct link to OpenAI\")\n\nLet's first look at an extremely simple example of tracking token usage for a single Chat model call.\n\ndanger\n\nThe callback handler does not currently support streaming token counts for legacy language models (e.g., `langchain_openai.OpenAI`). For support in a streaming context, refer to the corresponding guide for chat models [here](/v0.2/docs/how_to/chat_token_usage_tracking/).\n\n### Single call[​](#single-call \"Direct link to Single call\")\n\n from langchain_community.callbacks import get_openai_callbackfrom langchain_openai import OpenAIllm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\")with get_openai_callback() as cb: result = llm.invoke(\"Tell me a joke\") print(result) print(\"---\")print()print(f\"Total Tokens: {cb.total_tokens}\")print(f\"Prompt Tokens: {cb.prompt_tokens}\")print(f\"Completion Tokens: {cb.completion_tokens}\")print(f\"Total Cost (USD): ${cb.total_cost}\")\n\n**API Reference:**[get\\_openai\\_callback](https://api.python.langchain.com/en/latest/callbacks/langchain_community.callbacks.manager.get_openai_callback.html) | [OpenAI](https://api.python.langchain.com/en/latest/llms/langchain_openai.llms.base.OpenAI.html)\n\n Why don't scientists trust atoms?Because they make up everything.---Total Tokens: 18Prompt Tokens: 4Completion Tokens: 14Total Cost (USD): $3.4e-05\n\n### Multiple calls[​](#multiple-calls \"Direct link to Multiple calls\")\n\nAnything inside the context manager will get tracked. Here's an example of using it to track multiple calls in sequence to a chain. This will also work for an agent which may use multiple steps.\n\n from langchain_community.callbacks import get_openai_callbackfrom langchain_core.prompts import PromptTemplatefrom langchain_openai import OpenAIllm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\")template = PromptTemplate.from_template(\"Tell me a joke about {topic}\")chain = template | llmwith get_openai_callback() as cb: response = chain.invoke({\"topic\": \"birds\"}) print(response) response = chain.invoke({\"topic\": \"fish\"}) print(\"--\") print(response)print()print(\"---\")print(f\"Total Tokens: {cb.total_tokens}\")print(f\"Prompt Tokens: {cb.prompt_tokens}\")print(f\"Completion Tokens: {cb.completion_tokens}\")print(f\"Total Cost (USD): ${cb.total_cost}\")\n\n**API Reference:**[get\\_openai\\_callback](https://api.python.langchain.com/en/latest/callbacks/langchain_community.callbacks.manager.get_openai_callback.html) | [PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html) | [OpenAI](https://api.python.langchain.com/en/latest/llms/langchain_openai.llms.base.OpenAI.html)\n\n Why did the chicken go to the seance?To talk to the other side of the road!--Why did the fish need a lawyer?Because it got caught in a net!---Total Tokens: 50Prompt Tokens: 12Completion Tokens: 38Total Cost (USD): $9.400000000000001e-05\n\nStreaming[​](#streaming \"Direct link to Streaming\")\n---------------------------------------------------\n\ndanger\n\n`get_openai_callback` does not currently support streaming token counts for legacy language models (e.g., `langchain_openai.OpenAI`). If you want to count tokens correctly in a streaming context, there are a number of options:\n\n* Use chat models as described in [this guide](/v0.2/docs/how_to/chat_token_usage_tracking/);\n* Implement a [custom callback handler](/v0.2/docs/how_to/custom_callbacks/) that uses appropriate tokenizers to count the tokens;\n* Use a monitoring platform such as [LangSmith](https://www.langchain.com/langsmith).\n\nNote that when using legacy language models in a streaming context, token counts are not updated:\n\n from langchain_community.callbacks import get_openai_callbackfrom langchain_openai import OpenAIllm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\")with get_openai_callback() as cb: for chunk in llm.stream(\"Tell me a joke\"): print(chunk, end=\"\", flush=True) print(result) print(\"---\")print()print(f\"Total Tokens: {cb.total_tokens}\")print(f\"Prompt Tokens: {cb.prompt_tokens}\")print(f\"Completion Tokens: {cb.completion_tokens}\")print(f\"Total Cost (USD): ${cb.total_cost}\")\n\n**API Reference:**[get\\_openai\\_callback](https://api.python.langchain.com/en/latest/callbacks/langchain_community.callbacks.manager.get_openai_callback.html) | [OpenAI](https://api.python.langchain.com/en/latest/llms/langchain_openai.llms.base.OpenAI.html)\n\n Why don't scientists trust atoms?Because they make up everything!Why don't scientists trust atoms?Because they make up everything.---Total Tokens: 0Prompt Tokens: 0Completion Tokens: 0Total Cost (USD): $0.0\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/llm_token_usage_tracking.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to cache LLM responses\n\n](/v0.2/docs/how_to/llm_caching/)[\n\nNext\n\nRun LLMs locally\n\n](/v0.2/docs/how_to/local_llms/)\n\n* [Using LangSmith](#using-langsmith)\n* [Using callbacks](#using-callbacks)\n * [OpenAI](#openai)\n * [Single call](#single-call)\n * [Multiple calls](#multiple-calls)\n* [Streaming](#streaming)"},"last_modified":{"kind":"null"}}},{"rowIdx":1376,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/contributing/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* Contributing\n* Welcome Contributors\n\nOn this page\n\nWelcome Contributors\n====================\n\nHi there! Thank you for even being interested in contributing to LangChain. As an open-source project in a rapidly developing field, we are extremely open to contributions, whether they involve new features, improved infrastructure, better documentation, or bug fixes.\n\n🗺️ Guidelines[​](#️-guidelines \"Direct link to 🗺️ Guidelines\")\n----------------------------------------------------------------\n\n### 👩‍💻 Ways to contribute[​](#-ways-to-contribute \"Direct link to 👩‍💻 Ways to contribute\")\n\nThere are many ways to contribute to LangChain. Here are some common ways people contribute:\n\n* [**Documentation**](/v0.2/docs/contributing/documentation/style_guide/): Help improve our docs, including this one!\n* [**Code**](/v0.2/docs/contributing/code/): Help us write code, fix bugs, or improve our infrastructure.\n* [**Integrations**](/v0.2/docs/contributing/integrations/): Help us integrate with your favorite vendors and tools.\n* [**Discussions**](https://github.com/langchain-ai/langchain/discussions): Help answer usage questions and discuss issues with users.\n\n### 🚩 GitHub Issues[​](#-github-issues \"Direct link to 🚩 GitHub Issues\")\n\nOur [issues](https://github.com/langchain-ai/langchain/issues) page is kept up to date with bugs, improvements, and feature requests.\n\nThere is a taxonomy of labels to help with sorting and discovery of issues of interest. Please use these to help organize issues.\n\nIf you start working on an issue, please assign it to yourself.\n\nIf you are adding an issue, please try to keep it focused on a single, modular bug/improvement/feature. If two issues are related, or blocking, please link them rather than combining them.\n\nWe will try to keep these issues as up-to-date as possible, though with the rapid rate of development in this field some may get out of date. If you notice this happening, please let us know.\n\n### 💭 GitHub Discussions[​](#-github-discussions \"Direct link to 💭 GitHub Discussions\")\n\nWe have a [discussions](https://github.com/langchain-ai/langchain/discussions) page where users can ask usage questions, discuss design decisions, and propose new features.\n\nIf you are able to help answer questions, please do so! This will allow the maintainers to spend more time focused on development and bug fixing.\n\n### 🙋 Getting Help[​](#-getting-help \"Direct link to 🙋 Getting Help\")\n\nOur goal is to have the simplest developer setup possible. Should you experience any difficulty getting setup, please contact a maintainer! Not only do we want to help get you unblocked, but we also want to make sure that the process is smooth for future contributors.\n\nIn a similar vein, we do enforce certain linting, formatting, and documentation standards in the codebase. If you are finding these difficult (or even just annoying) to work with, feel free to contact a maintainer for help - we do not want these to get in the way of getting good code into the codebase.\n\n### 🌟 Recognition[​](#-recognition \"Direct link to 🌟 Recognition\")\n\nIf your contribution has made its way into a release, we will want to give you credit on Twitter (only if you want though)! If you have a Twitter account you would like us to mention, please let us know in the PR or through another means.\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/contributing/index.mdx)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nNext\n\nRepository Structure\n\n](/v0.2/docs/contributing/repo_structure/)\n\n* [🗺️ Guidelines](#️-guidelines)\n * [👩‍💻 Ways to contribute](#-ways-to-contribute)\n * [🚩 GitHub Issues](#-github-issues)\n * [💭 GitHub Discussions](#-github-discussions)\n * [🙋 Getting Help](#-getting-help)\n * [🌟 Recognition](#-recognition)"},"last_modified":{"kind":"null"}}},{"rowIdx":1377,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/additional_resources/tutorials/"},"markdown":{"kind":"string","value":"On this page\n\n3rd Party Tutorials\n===================\n\nTutorials[​](#tutorials \"Direct link to Tutorials\")\n---------------------------------------------------\n\n### [LangChain v 0.1 by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae0gBSJ9T0w7cu7iJZbH3T31)[​](#langchain-v-01-by-langchainai \"Direct link to langchain-v-01-by-langchainai\")\n\n### [Build with Langchain - Advanced by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae06tclDATrMYY0idsTdLg9v)[​](#build-with-langchain---advanced-by-langchainai \"Direct link to build-with-langchain---advanced-by-langchainai\")\n\n### [LangGraph by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae16n2TWUkKq5PgJ0w6Pkwtg)[​](#langgraph-by-langchainai \"Direct link to langgraph-by-langchainai\")\n\n### [by Greg Kamradt](https://www.youtube.com/playlist?list=PLqZXAkvF1bPNQER9mLmDbntNfSpzdDIU5)[​](#by-greg-kamradt \"Direct link to by-greg-kamradt\")\n\n### [by Sam Witteveen](https://www.youtube.com/playlist?list=PL8motc6AQftk1Bs42EW45kwYbyJ4jOdiZ)[​](#by-sam-witteveen \"Direct link to by-sam-witteveen\")\n\n### [by James Briggs](https://www.youtube.com/playlist?list=PLIUOU7oqGTLieV9uTIFMm6_4PXg-hlN6F)[​](#by-james-briggs \"Direct link to by-james-briggs\")\n\n### [by Prompt Engineering](https://www.youtube.com/playlist?list=PLVEEucA9MYhOu89CX8H3MBZqayTbcCTMr)[​](#by-prompt-engineering \"Direct link to by-prompt-engineering\")\n\n### [by Mayo Oshin](https://www.youtube.com/@chatwithdata/search?query=langchain)[​](#by-mayo-oshin \"Direct link to by-mayo-oshin\")\n\n### [by 1 little Coder](https://www.youtube.com/playlist?list=PLpdmBGJ6ELUK-v0MK-t4wZmVEbxM5xk6L)[​](#by-1-little-coder \"Direct link to by-1-little-coder\")\n\n### [by BobLin (Chinese language)](https://www.youtube.com/playlist?list=PLbd7ntv6PxC3QMFQvtWfk55p-Op_syO1C)[​](#by-boblin-chinese-language \"Direct link to by-boblin-chinese-language\")\n\nCourses[​](#courses \"Direct link to Courses\")\n---------------------------------------------\n\n### Featured courses on Deeplearning.AI[​](#featured-courses-on-deeplearningai \"Direct link to Featured courses on Deeplearning.AI\")\n\n* [LangChain for LLM Application Development](https://www.deeplearning.ai/short-courses/langchain-for-llm-application-development/)\n* [LangChain Chat with Your Data](https://www.deeplearning.ai/short-courses/langchain-chat-with-your-data/)\n* [Functions, Tools and Agents with LangChain](https://www.deeplearning.ai/short-courses/functions-tools-agents-langchain/)\n* [Build LLM Apps with LangChain.js](https://www.deeplearning.ai/short-courses/build-llm-apps-with-langchain-js/)\n\n### Online courses[​](#online-courses \"Direct link to Online courses\")\n\n* [Udemy](https://www.udemy.com/courses/search/?q=langchain)\n* [DataCamp](https://www.datacamp.com/courses/developing-llm-applications-with-langchain)\n* [Pluralsight](https://www.pluralsight.com/search?q=langchain)\n* [Coursera](https://www.coursera.org/search?query=langchain)\n* [Maven](https://maven.com/courses?query=langchain)\n* [Udacity](https://www.udacity.com/catalog/all/any-price/any-school/any-skill/any-difficulty/any-duration/any-type/relevance/page-1?searchValue=langchain)\n* [LinkedIn Learning](https://www.linkedin.com/search/results/learning/?keywords=langchain)\n* [edX](https://www.edx.org/search?q=langchain)\n* [freeCodeCamp](https://www.youtube.com/@freecodecamp/search?query=langchain)\n\nShort Tutorials[​](#short-tutorials \"Direct link to Short Tutorials\")\n---------------------------------------------------------------------\n\n* [by Nicholas Renotte](https://youtu.be/MlK6SIjcjE8)\n* [by Patrick Loeber](https://youtu.be/LbT1yp6quS8)\n* [by Rabbitmetrics](https://youtu.be/aywZrzNaKjs)\n* [by Ivan Reznikov](https://medium.com/@ivanreznikov/langchain-101-course-updated-668f7b41d6cb)\n\nBooks and Handbooks[​](#books-and-handbooks \"Direct link to Books and Handbooks\")\n---------------------------------------------------------------------------------\n\n* [Generative AI with LangChain](https://www.amazon.com/Generative-AI-LangChain-language-ChatGPT/dp/1835083463/ref=sr_1_1?crid=1GMOMH0G7GLR&keywords=generative+ai+with+langchain&qid=1703247181&sprefix=%2Caps%2C298&sr=8-1) by [Ben Auffrath](https://www.amazon.com/stores/Ben-Auffarth/author/B08JQKSZ7D?ref=ap_rdr&store_ref=ap_rdr&isDramIntegrated=true&shoppingPortalEnabled=true), ©️ 2023 Packt Publishing\n* [LangChain AI Handbook](https://www.pinecone.io/learn/langchain/) By **James Briggs** and **Francisco Ingham**\n* [LangChain Cheatsheet](https://pub.towardsai.net/langchain-cheatsheet-all-secrets-on-a-single-page-8be26b721cde) by **Ivan Reznikov**\n* [Dive into Langchain (Chinese language)](https://langchain.boblin.app/)\n\n* * *\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/additional_resources/tutorials.mdx)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n* [Tutorials](#tutorials)\n * [LangChain v 0.1 by LangChain.ai](#langchain-v-01-by-langchainai)\n * [Build with Langchain - Advanced by LangChain.ai](#build-with-langchain---advanced-by-langchainai)\n * [LangGraph by LangChain.ai](#langgraph-by-langchainai)\n * [by Greg Kamradt](#by-greg-kamradt)\n * [by Sam Witteveen](#by-sam-witteveen)\n * [by James Briggs](#by-james-briggs)\n * [by Prompt Engineering](#by-prompt-engineering)\n * [by Mayo Oshin](#by-mayo-oshin)\n * [by 1 little Coder](#by-1-little-coder)\n * [by BobLin (Chinese language)](#by-boblin-chinese-language)\n* [Courses](#courses)\n * [Featured courses on Deeplearning.AI](#featured-courses-on-deeplearningai)\n * [Online courses](#online-courses)\n* [Short Tutorials](#short-tutorials)\n* [Books and Handbooks](#books-and-handbooks)"},"last_modified":{"kind":"null"}}},{"rowIdx":1378,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/templates/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* Templates\n\nOn this page\n\nTemplates\n=========\n\nHighlighting a few different categories of templates\n\n⭐ Popular[​](#-popular \"Direct link to ⭐ Popular\")\n--------------------------------------------------\n\nThese are some of the more popular templates to get started with.\n\n* [Retrieval Augmented Generation Chatbot](/v0.2/docs/templates/rag-conversation/): Build a chatbot over your data. Defaults to OpenAI and PineconeVectorStore.\n* [Extraction with OpenAI Functions](/v0.2/docs/templates/extraction-openai-functions/): Do extraction of structured data from unstructured data. Uses OpenAI function calling.\n* [Local Retrieval Augmented Generation](/v0.2/docs/templates/rag-chroma-private/): Build a chatbot over your data. Uses only local tooling: Ollama, GPT4all, Chroma.\n* [OpenAI Functions Agent](/v0.2/docs/templates/openai-functions-agent/): Build a chatbot that can take actions. Uses OpenAI function calling and Tavily.\n* [XML Agent](/v0.2/docs/templates/xml-agent/): Build a chatbot that can take actions. Uses Anthropic and You.com.\n\n📥 Advanced Retrieval[​](#-advanced-retrieval \"Direct link to 📥 Advanced Retrieval\")\n-------------------------------------------------------------------------------------\n\nThese templates cover advanced retrieval techniques, which can be used for chat and QA over databases or documents.\n\n* [Reranking](/v0.2/docs/templates/rag-pinecone-rerank/): This retrieval technique uses Cohere's reranking endpoint to rerank documents from an initial retrieval step.\n* [Anthropic Iterative Search](/v0.2/docs/templates/anthropic-iterative-search/): This retrieval technique uses iterative prompting to determine what to retrieve and whether the retriever documents are good enough.\n* **Parent Document Retrieval** using [Neo4j](/v0.2/docs/templates/neo4j-parent/) or [MongoDB](/v0.2/docs/templates/mongo-parent-document-retrieval/): This retrieval technique stores embeddings for smaller chunks, but then returns larger chunks to pass to the model for generation.\n* [Semi-Structured RAG](/v0.2/docs/templates/rag-semi-structured/): The template shows how to do retrieval over semi-structured data (e.g. data that involves both text and tables).\n* [Temporal RAG](/v0.2/docs/templates/rag-timescale-hybrid-search-time/): The template shows how to do hybrid search over data with a time-based component using [Timescale Vector](https://www.timescale.com/ai?utm_campaign=vectorlaunch&utm_source=langchain&utm_medium=referral).\n\n🔍Advanced Retrieval - Query Transformation[​](#advanced-retrieval---query-transformation \"Direct link to 🔍Advanced Retrieval - Query Transformation\")\n-------------------------------------------------------------------------------------------------------------------------------------------------------\n\nA selection of advanced retrieval methods that involve transforming the original user query, which can improve retrieval quality.\n\n* [Hypothetical Document Embeddings](/v0.2/docs/templates/hyde/): A retrieval technique that generates a hypothetical document for a given query, and then uses the embedding of that document to do semantic search. [Paper](https://arxiv.org/abs/2212.10496).\n* [Rewrite-Retrieve-Read](/v0.2/docs/templates/rewrite-retrieve-read/): A retrieval technique that rewrites a given query before passing it to a search engine. [Paper](https://arxiv.org/abs/2305.14283).\n* [Step-back QA Prompting](/v0.2/docs/templates/stepback-qa-prompting/): A retrieval technique that generates a \"step-back\" question and then retrieves documents relevant to both that question and the original question. [Paper](https://arxiv.org/abs//2310.06117).\n* [RAG-Fusion](/v0.2/docs/templates/rag-fusion/): A retrieval technique that generates multiple queries and then reranks the retrieved documents using reciprocal rank fusion. [Article](https://towardsdatascience.com/forget-rag-the-future-is-rag-fusion-1147298d8ad1).\n* [Multi-Query Retriever](/v0.2/docs/templates/rag-pinecone-multi-query/): This retrieval technique uses an LLM to generate multiple queries and then fetches documents for all queries.\n\n🧠Advanced Retrieval - Query Construction[​](#advanced-retrieval---query-construction \"Direct link to 🧠Advanced Retrieval - Query Construction\")\n-------------------------------------------------------------------------------------------------------------------------------------------------\n\nA selection of advanced retrieval methods that involve constructing a query in a separate DSL from natural language, which enable natural language chat over various structured databases.\n\n* [Elastic Query Generator](/v0.2/docs/templates/elastic-query-generator/): Generate elastic search queries from natural language.\n* [Neo4j Cypher Generation](/v0.2/docs/templates/neo4j-cypher/): Generate cypher statements from natural language. Available with a [\"full text\" option](/v0.2/docs/templates/neo4j-cypher-ft/) as well.\n* [Supabase Self Query](/v0.2/docs/templates/self-query-supabase/): Parse a natural language query into a semantic query as well as a metadata filter for Supabase.\n\n🦙 OSS Models[​](#-oss-models \"Direct link to 🦙 OSS Models\")\n-------------------------------------------------------------\n\nThese templates use OSS models, which enable privacy for sensitive data.\n\n* [Local Retrieval Augmented Generation](/v0.2/docs/templates/rag-chroma-private/): Build a chatbot over your data. Uses only local tooling: Ollama, GPT4all, Chroma.\n* [SQL Question Answering (Replicate)](/v0.2/docs/templates/sql-llama2/): Question answering over a SQL database, using Llama2 hosted on [Replicate](https://replicate.com/).\n* [SQL Question Answering (LlamaCpp)](/v0.2/docs/templates/sql-llamacpp/): Question answering over a SQL database, using Llama2 through [LlamaCpp](https://github.com/ggerganov/llama.cpp).\n* [SQL Question Answering (Ollama)](/v0.2/docs/templates/sql-ollama/): Question answering over a SQL database, using Llama2 through [Ollama](https://github.com/jmorganca/ollama).\n\n⛏️ Extraction[​](#️-extraction \"Direct link to ⛏️ Extraction\")\n--------------------------------------------------------------\n\nThese templates extract data in a structured format based upon a user-specified schema.\n\n* [Extraction Using OpenAI Functions](/v0.2/docs/templates/extraction-openai-functions/): Extract information from text using OpenAI Function Calling.\n* [Extraction Using Anthropic Functions](/v0.2/docs/templates/extraction-anthropic-functions/): Extract information from text using a LangChain wrapper around the Anthropic endpoints intended to simulate function calling.\n* [Extract BioTech Plate Data](/v0.2/docs/templates/plate-chain/): Extract microplate data from messy Excel spreadsheets into a more normalized format.\n\n⛏️Summarization and tagging[​](#️summarization-and-tagging \"Direct link to ⛏️Summarization and tagging\")\n--------------------------------------------------------------------------------------------------------\n\nThese templates summarize or categorize documents and text.\n\n* [Summarization using Anthropic](/v0.2/docs/templates/summarize-anthropic/): Uses Anthropic's Claude2 to summarize long documents.\n\n🤖 Agents[​](#-agents \"Direct link to 🤖 Agents\")\n-------------------------------------------------\n\nThese templates build chatbots that can take actions, helping to automate tasks.\n\n* [OpenAI Functions Agent](/v0.2/docs/templates/openai-functions-agent/): Build a chatbot that can take actions. Uses OpenAI function calling and Tavily.\n* [XML Agent](/v0.2/docs/templates/xml-agent/): Build a chatbot that can take actions. Uses Anthropic and You.com.\n\n🚨 Safety and evaluation[​](#-safety-and-evaluation \"Direct link to 🚨 Safety and evaluation\")\n----------------------------------------------------------------------------------------------\n\nThese templates enable moderation or evaluation of LLM outputs.\n\n* [Guardrails Output Parser](/v0.2/docs/templates/guardrails-output-parser/): Use guardrails-ai to validate LLM output.\n* [Chatbot Feedback](/v0.2/docs/templates/chat-bot-feedback/): Use LangSmith to evaluate chatbot responses.\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nNext\n\nanthropic-iterative-search\n\n](/v0.2/docs/templates/anthropic-iterative-search/)\n\n* [⭐ Popular](#-popular)\n* [📥 Advanced Retrieval](#-advanced-retrieval)\n* [🔍Advanced Retrieval - Query Transformation](#advanced-retrieval---query-transformation)\n* [🧠Advanced Retrieval - Query Construction](#advanced-retrieval---query-construction)\n* [🦙 OSS Models](#-oss-models)\n* [⛏️ Extraction](#️-extraction)\n* [⛏️Summarization and tagging](#️summarization-and-tagging)\n* [🤖 Agents](#-agents)\n* [🚨 Safety and evaluation](#-safety-and-evaluation)"},"last_modified":{"kind":"null"}}},{"rowIdx":1379,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/people/"},"markdown":{"kind":"string","value":"People\n======\n\nThere are some incredible humans from all over the world who have been instrumental in helping the LangChain community flourish 🌐!\n\nThis page highlights a few of those folks who have dedicated their time to the open-source repo in the form of direct contributions and reviews.\n\nTop reviewers[​](#top-reviewers \"Direct link to Top reviewers\")\n---------------------------------------------------------------\n\nAs LangChain has grown, the amount of surface area that maintainers cover has grown as well.\n\nThank you to the following folks who have gone above and beyond in reviewing incoming PRs 🙏!\n\n[![](https://avatars.githubusercontent.com/u/2256422?v=4)](https://github.com/leo-gan)[@leo-gan](https://github.com/leo-gan)\n\n[![](https://avatars.githubusercontent.com/u/11026406?v=4)](https://github.com/lkuligin)[@lkuligin](https://github.com/lkuligin)\n\n[![](https://avatars.githubusercontent.com/u/11633333?u=e13817e11b3fb8c3d209d747c77a0f0742d11138&v=4)](https://github.com/cbornet)[@cbornet](https://github.com/cbornet)\n\n[![](https://avatars.githubusercontent.com/u/289369?u=80655eb5f9a4d03bf1a526b07a67adc6eacccc6b&v=4)](https://github.com/3coins)[@3coins](https://github.com/3coins)\n\n[![](https://avatars.githubusercontent.com/u/48236177?u=757490c6af76be0a8837dd5886991005a23c89c7&v=4)](https://github.com/liugddx)[@liugddx](https://github.com/liugddx)\n\n[![](https://avatars.githubusercontent.com/u/49480?u=4a9b7c8820211aae14da7f72f617d88019a06569&v=4)](https://github.com/joemcelroy)[@joemcelroy](https://github.com/joemcelroy)\n\n[![](https://avatars.githubusercontent.com/u/67427?v=4)](https://github.com/jexp)[@jexp](https://github.com/jexp)\n\n[![](https://avatars.githubusercontent.com/u/44113430?u=34bdaacaeb2880e40fb4b07897c481771c6de544&v=4)](https://github.com/mspronesti)[@mspronesti](https://github.com/mspronesti)\n\n[![](https://avatars.githubusercontent.com/u/8429627?u=d28653fbd93c966ac840f93a05f0ef949495851f&v=4)](https://github.com/JohnNay)[@JohnNay](https://github.com/JohnNay)\n\n[![](https://avatars.githubusercontent.com/u/749277?u=84aeb7b75146a67f8b18b389dc591ba72ef105e4&v=4)](https://github.com/tjaffri)[@tjaffri](https://github.com/tjaffri)\n\n[![](https://avatars.githubusercontent.com/u/72488598?u=98dc24a63369cbae14913caff5f379f80f305aab&v=4)](https://github.com/Undertone0809)[@Undertone0809](https://github.com/Undertone0809)\n\n[![](https://avatars.githubusercontent.com/u/6690839?u=e56c2161ddc98c58b01fb82da4076e5400fb1e6d&v=4)](https://github.com/sjwhitmore)[@sjwhitmore](https://github.com/sjwhitmore)\n\n[![](https://avatars.githubusercontent.com/u/13262395?u=430eff10dfbb7d3f27a35f1ea2c9ea6a61067c88&v=4)](https://github.com/holtskinner)[@holtskinner](https://github.com/holtskinner)\n\n[![](https://avatars.githubusercontent.com/u/19948365?v=4)](https://github.com/tomasonjo)[@tomasonjo](https://github.com/tomasonjo)\n\n[![](https://avatars.githubusercontent.com/u/62768671?u=279f772a5b8325a191a1a8bb623aa40f32a01856&v=4)](https://github.com/skcoirz)[@skcoirz](https://github.com/skcoirz)\n\n[![](https://avatars.githubusercontent.com/u/20304844?u=f00461bcedad6ba384a4e234a44c906802448b4e&v=4)](https://github.com/tylerhutcherson)[@tylerhutcherson](https://github.com/tylerhutcherson)\n\n[![](https://avatars.githubusercontent.com/u/13009163?u=c2b3a11cceaadbc9415f545b971250c9e2b2078b&v=4)](https://github.com/Spartee)[@Spartee](https://github.com/Spartee)\n\n[![](https://avatars.githubusercontent.com/u/19181718?u=79a9013dea28a7fa654431cd7e89b08dc76434dd&v=4)](https://github.com/sepiatone)[@sepiatone](https://github.com/sepiatone)\n\n[![](https://avatars.githubusercontent.com/u/123224380?v=4)](https://github.com/scadEfUr)[@scadEfUr](https://github.com/scadEfUr)\n\n[![](https://avatars.githubusercontent.com/u/1635179?u=0631cb84ca580089198114f94d9c27efe730220e&v=4)](https://github.com/MthwRobinson)[@MthwRobinson](https://github.com/MthwRobinson)\n\n[![](https://avatars.githubusercontent.com/u/891664?u=722172a0061f68ab22819fa88a354ec973f70a63&v=4)](https://github.com/jeffchuber)[@jeffchuber](https://github.com/jeffchuber)\n\n[![](https://avatars.githubusercontent.com/u/2649301?u=5e688d2b90ddcafd5028a9da292010144cad6d18&v=4)](https://github.com/kacperlukawski)[@kacperlukawski](https://github.com/kacperlukawski)\n\n[![](https://avatars.githubusercontent.com/u/25930426?v=4)](https://github.com/pranjaldoshi96)[@pranjaldoshi96](https://github.com/pranjaldoshi96)\n\n[![](https://avatars.githubusercontent.com/u/46051506?u=026f5f140e8b7ba4744bf971f9ebdea9ebab67ca&v=4)](https://github.com/Anush008)[@Anush008](https://github.com/Anush008)\n\n[![](https://avatars.githubusercontent.com/u/23314389?u=2014e20e246530fa89bd902fe703b6f9e6ecf833&v=4)](https://github.com/nicoloboschi)[@nicoloboschi](https://github.com/nicoloboschi)\n\n[![](https://avatars.githubusercontent.com/u/5015933?u=80e339672a321cde25f4b484129bbddfefb2356d&v=4)](https://github.com/ShaneHarvey)[@ShaneHarvey](https://github.com/ShaneHarvey)\n\n[![](https://avatars.githubusercontent.com/u/13749212?u=b58700c3bd236e880223bccba53b7ad0dd4d7003&v=4)](https://github.com/eavanvalkenburg)[@eavanvalkenburg](https://github.com/eavanvalkenburg)\n\n[![](https://avatars.githubusercontent.com/u/1097932?u=0e9c1cc9e2c02469e52963322344af181464bf43&v=4)](https://github.com/gengliangwang)[@gengliangwang](https://github.com/gengliangwang)\n\n[![](https://avatars.githubusercontent.com/u/39497902?u=0c1597698c6f28da87d80ac0de9c8276d5ab63e9&v=4)](https://github.com/dbczumar)[@dbczumar](https://github.com/dbczumar)\n\n[![](https://avatars.githubusercontent.com/u/17039389?u=796226152becf82c4d7fd5cc49a24e58a73ce66f&v=4)](https://github.com/harupy)[@harupy](https://github.com/harupy)\n\n[![](https://avatars.githubusercontent.com/u/251292?u=a7465aae734d2cbc12d26b885b07d466d969bf0c&v=4)](https://github.com/jmorganca)[@jmorganca](https://github.com/jmorganca)\n\n[![](https://avatars.githubusercontent.com/u/2096628?u=2a4822ff8dc6b4f1162c58716d48fdfac08c8601&v=4)](https://github.com/blink1073)[@blink1073](https://github.com/blink1073)\n\n[![](https://avatars.githubusercontent.com/u/14221764?u=47a1405343b4d92caed3744e82dda1d28d01a251&v=4)](https://github.com/hemidactylus)[@hemidactylus](https://github.com/hemidactylus)\n\n[![](https://avatars.githubusercontent.com/u/101075607?v=4)](https://github.com/andersenchen)[@andersenchen](https://github.com/andersenchen)\n\n[![](https://avatars.githubusercontent.com/u/43734688?u=78f139fa940620e301361a58821c9f56128f71d9&v=4)](https://github.com/sam-h-bean)[@sam-h-bean](https://github.com/sam-h-bean)\n\n[![](https://avatars.githubusercontent.com/u/20311743?u=29bf2391ae34297a12a88d813731b0bdf289e4a5&v=4)](https://github.com/nickscamara)[@nickscamara](https://github.com/nickscamara)\n\n[![](https://avatars.githubusercontent.com/u/89161683?u=4a59b199c77215fe3cb8c937797b909061ec49af&v=4)](https://github.com/naveentatikonda)[@naveentatikonda](https://github.com/naveentatikonda)\n\n[![](https://avatars.githubusercontent.com/u/24217337?u=09d0e274f382e264ef578e93b547fb55a5b179fe&v=4)](https://github.com/kylehh)[@kylehh](https://github.com/kylehh)\n\n[![](https://avatars.githubusercontent.com/u/6162415?u=82e86c06ae37add3750f9db9ad9d7dfa250ddae7&v=4)](https://github.com/navneet1v)[@navneet1v](https://github.com/navneet1v)\n\n[![](https://avatars.githubusercontent.com/u/851520?u=21c6d8ef697fd32a8020d81269e155a24cb081ac&v=4)](https://github.com/maxjakob)[@maxjakob](https://github.com/maxjakob)\n\n[![](https://avatars.githubusercontent.com/u/2887713?u=7bb198c7d11d29a412dc836818f3da6666f643ee&v=4)](https://github.com/Jibola)[@Jibola](https://github.com/Jibola)\n\nTop recent contributors[​](#top-recent-contributors \"Direct link to Top recent contributors\")\n---------------------------------------------------------------------------------------------\n\nThe list below contains contributors who have had the most PRs merged in the last three months, weighted (imperfectly) by impact.\n\nThank you all so much for your time and efforts in making LangChain better ❤️!\n\n[![](https://avatars.githubusercontent.com/u/2256422?v=4)](https://github.com/leo-gan)[@leo-gan](https://github.com/leo-gan)\n\n[![](https://avatars.githubusercontent.com/u/11633333?u=e13817e11b3fb8c3d209d747c77a0f0742d11138&v=4)](https://github.com/cbornet)[@cbornet](https://github.com/cbornet)\n\n[![](https://avatars.githubusercontent.com/u/19948365?v=4)](https://github.com/tomasonjo)[@tomasonjo](https://github.com/tomasonjo)\n\n[![](https://avatars.githubusercontent.com/u/19181718?u=79a9013dea28a7fa654431cd7e89b08dc76434dd&v=4)](https://github.com/sepiatone)[@sepiatone](https://github.com/sepiatone)\n\n[![](https://avatars.githubusercontent.com/u/48236177?u=757490c6af76be0a8837dd5886991005a23c89c7&v=4)](https://github.com/liugddx)[@liugddx](https://github.com/liugddx)\n\n[![](https://avatars.githubusercontent.com/u/851520?u=21c6d8ef697fd32a8020d81269e155a24cb081ac&v=4)](https://github.com/maxjakob)[@maxjakob](https://github.com/maxjakob)\n\n[![](https://avatars.githubusercontent.com/u/11026406?v=4)](https://github.com/lkuligin)[@lkuligin](https://github.com/lkuligin)\n\n[![](https://avatars.githubusercontent.com/u/127103098?v=4)](https://github.com/harry-cohere)[@harry-cohere](https://github.com/harry-cohere)\n\n[![](https://avatars.githubusercontent.com/u/91237924?u=76e7131a2ebbe9ef35061620286d6d06258e7a61&v=4)](https://github.com/OpenVINO-dev-contest)[@OpenVINO-dev-contest](https://github.com/OpenVINO-dev-contest)\n\n[![](https://avatars.githubusercontent.com/u/14959173?u=87fcb0013440f648fb263168583695258b6dbf1c&v=4)](https://github.com/jhpiedrahitao)[@jhpiedrahitao](https://github.com/jhpiedrahitao)\n\n[![](https://avatars.githubusercontent.com/u/2887713?u=7bb198c7d11d29a412dc836818f3da6666f643ee&v=4)](https://github.com/Jibola)[@Jibola](https://github.com/Jibola)\n\n[![](https://avatars.githubusercontent.com/u/31382824?u=9ce2d58c7c1c9f9a225f1929633b77c24d607d5b&v=4)](https://github.com/Adi8885)[@Adi8885](https://github.com/Adi8885)\n\n[![](https://avatars.githubusercontent.com/u/144115527?u=b881a61482b25b543dacd217d18fc5b98c38e7a3&v=4)](https://github.com/billytrend-cohere)[@billytrend-cohere](https://github.com/billytrend-cohere)\n\n[![](https://avatars.githubusercontent.com/u/139469471?v=4)](https://github.com/MateuszOssGit)[@MateuszOssGit](https://github.com/MateuszOssGit)\n\n[![](https://avatars.githubusercontent.com/u/39553475?u=919fcd626077055164ce97bf6cde0a47c54507de&v=4)](https://github.com/Josephasafg)[@Josephasafg](https://github.com/Josephasafg)\n\n[![](https://avatars.githubusercontent.com/u/9318457?u=3dbf765a07fee48e3dd171851b8417c002a41f49&v=4)](https://github.com/rahul-trip)[@rahul-trip](https://github.com/rahul-trip)\n\n[![](https://avatars.githubusercontent.com/u/35945268?u=4379ecd5062eea0f6449c520ddde5fe1e3724500&v=4)](https://github.com/junkeon)[@junkeon](https://github.com/junkeon)\n\n[![](https://avatars.githubusercontent.com/u/63123596?u=ae18d496d5a6ced90d57c147f102f7c5ecf8e63f&v=4)](https://github.com/maximeperrindev)[@maximeperrindev](https://github.com/maximeperrindev)\n\n[![](https://avatars.githubusercontent.com/u/45242107?u=bf122f1371d59c3ba69a87225255fbd00e894404&v=4)](https://github.com/keenborder786)[@keenborder786](https://github.com/keenborder786)\n\n[![](https://avatars.githubusercontent.com/u/22965499?u=883e3e34158ff6beadadef0178f83d1200be1acf&v=4)](https://github.com/sfvaroglu)[@sfvaroglu](https://github.com/sfvaroglu)\n\nCore maintainers[​](#core-maintainers \"Direct link to Core maintainers\")\n------------------------------------------------------------------------\n\nHello there 👋!\n\nWe're LangChain's core maintainers. If you've spent time in the community, you've probably crossed paths with at least one of us already.\n\n[![](https://avatars.githubusercontent.com/u/9557659?u=44391f1f5f5e3a72acc9772ca30f28bfdcc25fac&v=4)](https://github.com/efriis)[@efriis](https://github.com/efriis)\n\n[![](https://avatars.githubusercontent.com/u/26529506?u=528b1df1ba3ba4f21e3e1fb74b12766e5b04c487&v=4)](https://github.com/ccurme)[@ccurme](https://github.com/ccurme)\n\n[![](https://avatars.githubusercontent.com/u/9536492?u=820809d60f4a720a4e1f507a1bf866dfb5f86614&v=4)](https://github.com/agola11)[@agola11](https://github.com/agola11)\n\n[![](https://avatars.githubusercontent.com/u/22008038?u=8e3d6bbd0adbe02f0bd259c44f2ddb8612f90d88&v=4)](https://github.com/baskaryan)[@baskaryan](https://github.com/baskaryan)\n\n[![](https://avatars.githubusercontent.com/u/19161700?u=e76bcd472b51c9f07befd2654783d0a381f49005&v=4)](https://github.com/vbarda)[@vbarda](https://github.com/vbarda)\n\n[![](https://avatars.githubusercontent.com/u/3205522?v=4)](https://github.com/eyurtsev)[@eyurtsev](https://github.com/eyurtsev)\n\n[![](https://avatars.githubusercontent.com/u/56902?u=fdb30e802c68bc338dd9c0820f713e4fdac75db7&v=4)](https://github.com/nfcampos)[@nfcampos](https://github.com/nfcampos)\n\n[![](https://avatars.githubusercontent.com/u/122662504?u=e88c472fba16a74332c550cc9707fd015738a0da&v=4)](https://github.com/rlancemartin)[@rlancemartin](https://github.com/rlancemartin)\n\n[![](https://avatars.githubusercontent.com/u/13333726?u=82ebf1e0eb0663ebd49ba66f67a43f51bbf11442&v=4)](https://github.com/hinthornw)[@hinthornw](https://github.com/hinthornw)\n\n[![](https://avatars.githubusercontent.com/u/11986836?u=f4c4f21a82b2af6c9f91e1f1d99ea40062f7a101&v=4)](https://github.com/hwchase17)[@hwchase17](https://github.com/hwchase17)\n\nTop all-time contributors[​](#top-all-time-contributors \"Direct link to Top all-time contributors\")\n---------------------------------------------------------------------------------------------------\n\nAnd finally, this is an all-time list of all-stars who have made significant contributions to the framework 🌟:\n\n[![](https://avatars.githubusercontent.com/u/2256422?v=4)](https://github.com/leo-gan)[@leo-gan](https://github.com/leo-gan)\n\n[![](https://avatars.githubusercontent.com/u/11633333?u=e13817e11b3fb8c3d209d747c77a0f0742d11138&v=4)](https://github.com/cbornet)[@cbornet](https://github.com/cbornet)\n\n[![](https://avatars.githubusercontent.com/u/19948365?v=4)](https://github.com/tomasonjo)[@tomasonjo](https://github.com/tomasonjo)\n\n[![](https://avatars.githubusercontent.com/u/11026406?v=4)](https://github.com/lkuligin)[@lkuligin](https://github.com/lkuligin)\n\n[![](https://avatars.githubusercontent.com/u/1635179?u=0631cb84ca580089198114f94d9c27efe730220e&v=4)](https://github.com/MthwRobinson)[@MthwRobinson](https://github.com/MthwRobinson)\n\n[![](https://avatars.githubusercontent.com/u/2649301?u=5e688d2b90ddcafd5028a9da292010144cad6d18&v=4)](https://github.com/kacperlukawski)[@kacperlukawski](https://github.com/kacperlukawski)\n\n[![](https://avatars.githubusercontent.com/u/14221764?u=47a1405343b4d92caed3744e82dda1d28d01a251&v=4)](https://github.com/hemidactylus)[@hemidactylus](https://github.com/hemidactylus)\n\n[![](https://avatars.githubusercontent.com/u/707699?u=5af157e56c17bb694ed78f27ba313dcb576f00bd&v=4)](https://github.com/timothyasp)[@timothyasp](https://github.com/timothyasp)\n\n[![](https://avatars.githubusercontent.com/u/6690839?u=e56c2161ddc98c58b01fb82da4076e5400fb1e6d&v=4)](https://github.com/sjwhitmore)[@sjwhitmore](https://github.com/sjwhitmore)\n\n[![](https://avatars.githubusercontent.com/u/289369?u=80655eb5f9a4d03bf1a526b07a67adc6eacccc6b&v=4)](https://github.com/3coins)[@3coins](https://github.com/3coins)\n\n[![](https://avatars.githubusercontent.com/u/6439365?u=51c4e9ea28b36473f21524fb68f7b717047e36f9&v=4)](https://github.com/mbchang)[@mbchang](https://github.com/mbchang)\n\n[![](https://avatars.githubusercontent.com/u/131175?u=332fe36f12d9ffe9e4414dc776b381fe801a9c53&v=4)](https://github.com/danielchalef)[@danielchalef](https://github.com/danielchalef)\n\n[![](https://avatars.githubusercontent.com/u/48236177?u=757490c6af76be0a8837dd5886991005a23c89c7&v=4)](https://github.com/liugddx)[@liugddx](https://github.com/liugddx)\n\n[![](https://avatars.githubusercontent.com/u/44113430?u=34bdaacaeb2880e40fb4b07897c481771c6de544&v=4)](https://github.com/mspronesti)[@mspronesti](https://github.com/mspronesti)\n\n[![](https://avatars.githubusercontent.com/u/15604894?u=420ab32f71fa4a6839da653b5a5d97381b087902&v=4)](https://github.com/chyroc)[@chyroc](https://github.com/chyroc)\n\n[![](https://avatars.githubusercontent.com/u/13749212?u=b58700c3bd236e880223bccba53b7ad0dd4d7003&v=4)](https://github.com/eavanvalkenburg)[@eavanvalkenburg](https://github.com/eavanvalkenburg)\n\n[![](https://avatars.githubusercontent.com/u/23517545?u=06757717778f7c2a0a092b78edfc242d356a2b3f&v=4)](https://github.com/shibuiwilliam)[@shibuiwilliam](https://github.com/shibuiwilliam)\n\n[![](https://avatars.githubusercontent.com/u/13262395?u=430eff10dfbb7d3f27a35f1ea2c9ea6a61067c88&v=4)](https://github.com/holtskinner)[@holtskinner](https://github.com/holtskinner)\n\n[![](https://avatars.githubusercontent.com/u/19181718?u=79a9013dea28a7fa654431cd7e89b08dc76434dd&v=4)](https://github.com/sepiatone)[@sepiatone](https://github.com/sepiatone)\n\n[![](https://avatars.githubusercontent.com/u/139469471?v=4)](https://github.com/MateuszOssGit)[@MateuszOssGit](https://github.com/MateuszOssGit)\n\n[![](https://avatars.githubusercontent.com/u/24279597?u=05e329b5fa4f95223f9fbb1daa07118f72e4a071&v=4)](https://github.com/fpingham)[@fpingham](https://github.com/fpingham)\n\n[![](https://avatars.githubusercontent.com/u/10000925?u=7970fa7b01d133adfe533c4311b7963e22dc6766&v=4)](https://github.com/169)[@169](https://github.com/169)\n\n[![](https://avatars.githubusercontent.com/u/749277?u=84aeb7b75146a67f8b18b389dc591ba72ef105e4&v=4)](https://github.com/tjaffri)[@tjaffri](https://github.com/tjaffri)\n\n[![](https://avatars.githubusercontent.com/u/144115527?u=b881a61482b25b543dacd217d18fc5b98c38e7a3&v=4)](https://github.com/billytrend-cohere)[@billytrend-cohere](https://github.com/billytrend-cohere)\n\n[![](https://avatars.githubusercontent.com/u/20311743?u=29bf2391ae34297a12a88d813731b0bdf289e4a5&v=4)](https://github.com/nickscamara)[@nickscamara](https://github.com/nickscamara)\n\n[![](https://avatars.githubusercontent.com/u/851520?u=21c6d8ef697fd32a8020d81269e155a24cb081ac&v=4)](https://github.com/maxjakob)[@maxjakob](https://github.com/maxjakob)\n\n[![](https://avatars.githubusercontent.com/u/45242107?u=bf122f1371d59c3ba69a87225255fbd00e894404&v=4)](https://github.com/keenborder786)[@keenborder786](https://github.com/keenborder786)\n\n[![](https://avatars.githubusercontent.com/u/142261444?u=23524d34d4d0dfce963a24131a3c28e89daa9fc7&v=4)](https://github.com/maks-operlejn-ds)[@maks-operlejn-ds](https://github.com/maks-operlejn-ds)\n\n[![](https://avatars.githubusercontent.com/u/1823547?u=ea9246b84dbc3886d96ba171aabb64d2470c8d60&v=4)](https://github.com/ofermend)[@ofermend](https://github.com/ofermend)\n\n[![](https://avatars.githubusercontent.com/u/64213648?u=a9a3c39e0277dcb74d102e73511df929d2a1ecc6&v=4)](https://github.com/sergerdn)[@sergerdn](https://github.com/sergerdn)\n\n[![](https://avatars.githubusercontent.com/u/57520563?v=4)](https://github.com/volodymyr-memsql)[@volodymyr-memsql](https://github.com/volodymyr-memsql)\n\n[![](https://avatars.githubusercontent.com/u/2887713?u=7bb198c7d11d29a412dc836818f3da6666f643ee&v=4)](https://github.com/Jibola)[@Jibola](https://github.com/Jibola)\n\n[![](https://avatars.githubusercontent.com/u/6519888?u=fe0b0f093e8683bdac4f205b237d2e48d7c755d4&v=4)](https://github.com/averikitsch)[@averikitsch](https://github.com/averikitsch)\n\n[![](https://avatars.githubusercontent.com/u/89161683?u=4a59b199c77215fe3cb8c937797b909061ec49af&v=4)](https://github.com/naveentatikonda)[@naveentatikonda](https://github.com/naveentatikonda)\n\n[![](https://avatars.githubusercontent.com/u/56769451?u=088102b6160822bc68c25a2a5df170080d0b16a2&v=4)](https://github.com/tyumentsev4)[@tyumentsev4](https://github.com/tyumentsev4)\n\n[![](https://avatars.githubusercontent.com/u/40663591?u=d0a44575938f379eb414c15d9bdc0ecf6911f1b8&v=4)](https://github.com/UmerHA)[@UmerHA](https://github.com/UmerHA)\n\n[![](https://avatars.githubusercontent.com/u/84336755?u=35224f42916080bd7add99571a3132f5ef8217b8&v=4)](https://github.com/joshuasundance-swca)[@joshuasundance-swca](https://github.com/joshuasundance-swca)\n\n[![](https://avatars.githubusercontent.com/u/54854336?v=4)](https://github.com/adolkhan)[@adolkhan](https://github.com/adolkhan)\n\n[![](https://avatars.githubusercontent.com/u/22579106?v=4)](https://github.com/seamusp)[@seamusp](https://github.com/seamusp)\n\n[![](https://avatars.githubusercontent.com/u/63565275?u=08a65e589a3045dad9c13218858c8a91d16528fc&v=4)](https://github.com/michaelfeil)[@michaelfeil](https://github.com/michaelfeil)\n\n[![](https://avatars.githubusercontent.com/u/31382824?u=9ce2d58c7c1c9f9a225f1929633b77c24d607d5b&v=4)](https://github.com/Adi8885)[@Adi8885](https://github.com/Adi8885)\n\n[![](https://avatars.githubusercontent.com/u/210457?u=3f6ac4dcc1ec9f1b98cc62fd7095120da2accbc4&v=4)](https://github.com/blob42)[@blob42](https://github.com/blob42)\n\n[![](https://avatars.githubusercontent.com/u/3690240?v=4)](https://github.com/malandis)[@malandis](https://github.com/malandis)\n\n[![](https://avatars.githubusercontent.com/u/8456706?u=bc28d399a4ef7495eaa1e8a8a7b99dda98217260&v=4)](https://github.com/mpskex)[@mpskex](https://github.com/mpskex)\n\n[![](https://avatars.githubusercontent.com/u/7069390?u=c10e9b05119b96e82f03a807a2392f938a59f4ef&v=4)](https://github.com/davidbuniat)[@davidbuniat](https://github.com/davidbuniat)\n\n[![](https://avatars.githubusercontent.com/u/5787923?u=368596daa7442493d6c26725eb7d0ac5678c7e73&v=4)](https://github.com/ShreyaR)[@ShreyaR](https://github.com/ShreyaR)\n\n[![](https://avatars.githubusercontent.com/u/1825679?u=bc5db0325ef2a546c67e1e2ae1f7a0af7afe6803&v=4)](https://github.com/maiqingqiang)[@maiqingqiang](https://github.com/maiqingqiang)\n\n[![](https://avatars.githubusercontent.com/u/20304844?u=f00461bcedad6ba384a4e234a44c906802448b4e&v=4)](https://github.com/tylerhutcherson)[@tylerhutcherson](https://github.com/tylerhutcherson)\n\n[![](https://avatars.githubusercontent.com/u/62768671?u=279f772a5b8325a191a1a8bb623aa40f32a01856&v=4)](https://github.com/skcoirz)[@skcoirz](https://github.com/skcoirz)\n\n[![](https://avatars.githubusercontent.com/u/91237924?u=76e7131a2ebbe9ef35061620286d6d06258e7a61&v=4)](https://github.com/OpenVINO-dev-contest)[@OpenVINO-dev-contest](https://github.com/OpenVINO-dev-contest)\n\n[![](https://avatars.githubusercontent.com/u/8990777?u=9f7c4ab36aa10d7594748fdc9ddba6ff3f0a2f77&v=4)](https://github.com/jamesbraza)[@jamesbraza](https://github.com/jamesbraza)\n\n[![](https://avatars.githubusercontent.com/u/66525873?u=71102c35b5c8d325d34c32a4f9a07b6f97d90836&v=4)](https://github.com/manuel-soria)[@manuel-soria](https://github.com/manuel-soria)\n\n[![](https://avatars.githubusercontent.com/u/94075036?u=b636b7e4d6abff66af96ccae00d539db4735eea1&v=4)](https://github.com/CG80499)[@CG80499](https://github.com/CG80499)\n\n[![](https://avatars.githubusercontent.com/u/60956360?u=5678f015273d23e2cbdacbe172bcf154de0f4f86&v=4)](https://github.com/outday29)[@outday29](https://github.com/outday29)\n\n[![](https://avatars.githubusercontent.com/u/127103098?v=4)](https://github.com/harry-cohere)[@harry-cohere](https://github.com/harry-cohere)\n\n[![](https://avatars.githubusercontent.com/u/1821407?u=0a24b0db8c1a9231ce1c347de92f57341defada2&v=4)](https://github.com/GMartin-dev)[@GMartin-dev](https://github.com/GMartin-dev)\n\n[![](https://avatars.githubusercontent.com/u/15918167?v=4)](https://github.com/ljeagle)[@ljeagle](https://github.com/ljeagle)\n\n[![](https://avatars.githubusercontent.com/u/46051506?u=026f5f140e8b7ba4744bf971f9ebdea9ebab67ca&v=4)](https://github.com/Anush008)[@Anush008](https://github.com/Anush008)\n\n[![](https://avatars.githubusercontent.com/u/49480?u=4a9b7c8820211aae14da7f72f617d88019a06569&v=4)](https://github.com/joemcelroy)[@joemcelroy](https://github.com/joemcelroy)\n\n[![](https://avatars.githubusercontent.com/u/13748374?u=47b1f523342466ab97dd23e285418c5f5c9820c4&v=4)](https://github.com/wangxuqi)[@wangxuqi](https://github.com/wangxuqi)\n\n[![](https://avatars.githubusercontent.com/u/901795?u=c8cd7391f649623258b5f5ea848550df9407107b&v=4)](https://github.com/virattt)[@virattt](https://github.com/virattt)\n\n[![](https://avatars.githubusercontent.com/u/1097932?u=0e9c1cc9e2c02469e52963322344af181464bf43&v=4)](https://github.com/gengliangwang)[@gengliangwang](https://github.com/gengliangwang)\n\n[![](https://avatars.githubusercontent.com/u/20971593?u=1574196bb286044d23a04aa5aa34203ada8f4309&v=4)](https://github.com/jzluo)[@jzluo](https://github.com/jzluo)\n\n[![](https://avatars.githubusercontent.com/u/39553475?u=919fcd626077055164ce97bf6cde0a47c54507de&v=4)](https://github.com/Josephasafg)[@Josephasafg](https://github.com/Josephasafg)\n\n[![](https://avatars.githubusercontent.com/u/10701973?u=866bdbf25a3759626815099ce480e2ffcff520fb&v=4)](https://github.com/IANTHEREAL)[@IANTHEREAL](https://github.com/IANTHEREAL)\n\n[![](https://avatars.githubusercontent.com/u/142883372?u=45481f472f5f89c4d8ca8788617ffac47c5ebd88&v=4)](https://github.com/mateusz-wosinski-ds)[@mateusz-wosinski-ds](https://github.com/mateusz-wosinski-ds)\n\n[![](https://avatars.githubusercontent.com/u/5013466?u=f46f9262437c7f899394561c2f2dcb7e4b669868&v=4)](https://github.com/Jped)[@Jped](https://github.com/Jped)\n\n[![](https://avatars.githubusercontent.com/u/24587702?u=bc1fe15724c747b755a5b3812e802d7cbdd134c2&v=4)](https://github.com/hughcrt)[@hughcrt](https://github.com/hughcrt)\n\n[![](https://avatars.githubusercontent.com/u/62176855?v=4)](https://github.com/cs0lar)[@cs0lar](https://github.com/cs0lar)\n\n[![](https://avatars.githubusercontent.com/u/141953346?u=ede12989daf498a2df632344378a57e4f2b4c317&v=4)](https://github.com/ShorthillsAI)[@ShorthillsAI](https://github.com/ShorthillsAI)\n\n[![](https://avatars.githubusercontent.com/u/17039389?u=796226152becf82c4d7fd5cc49a24e58a73ce66f&v=4)](https://github.com/harupy)[@harupy](https://github.com/harupy)\n\n[![](https://avatars.githubusercontent.com/u/1296705?v=4)](https://github.com/lalanikarim)[@lalanikarim](https://github.com/lalanikarim)\n\n[![](https://avatars.githubusercontent.com/u/14959173?u=87fcb0013440f648fb263168583695258b6dbf1c&v=4)](https://github.com/jhpiedrahitao)[@jhpiedrahitao](https://github.com/jhpiedrahitao)\n\n[![](https://avatars.githubusercontent.com/u/24217337?u=09d0e274f382e264ef578e93b547fb55a5b179fe&v=4)](https://github.com/kylehh)[@kylehh](https://github.com/kylehh)\n\n[![](https://avatars.githubusercontent.com/u/53237856?u=656560c61bb540c9930574037126d2280ef0b4f8&v=4)](https://github.com/jeffvestal)[@jeffvestal](https://github.com/jeffvestal)\n\n[![](https://avatars.githubusercontent.com/u/32310964?u=56cd9386d632a330b8ecb180d7271b3d043c93a3&v=4)](https://github.com/VKudlay)[@VKudlay](https://github.com/VKudlay)\n\n[![](https://avatars.githubusercontent.com/u/25208228?u=a89453c38529259ef0ac9c6fd2a695311a680386&v=4)](https://github.com/conceptofmind)[@conceptofmind](https://github.com/conceptofmind)\n\n[![](https://avatars.githubusercontent.com/u/22171838?u=a7c4ea3fcebeafc5e9857727974bf2a3362dafe4&v=4)](https://github.com/ruoccofabrizio)[@ruoccofabrizio](https://github.com/ruoccofabrizio)\n\n[![](https://avatars.githubusercontent.com/u/49201354?u=adef4744d1abcd52f751d21a30fbe52abddf9b94&v=4)](https://github.com/axiangcoding)[@axiangcoding](https://github.com/axiangcoding)\n\n[![](https://avatars.githubusercontent.com/u/2464556?u=4d6150c38daf305b43153112d1f2815d287273ea&v=4)](https://github.com/homanp)[@homanp](https://github.com/homanp)\n\n[![](https://avatars.githubusercontent.com/u/10434946?u=6e20682a9c48909576b6ecc2fc93da3dbb90a52a&v=4)](https://github.com/yakigac)[@yakigac](https://github.com/yakigac)\n\n[![](https://avatars.githubusercontent.com/u/204694?u=c42de41cff108d35269dd2e8fac8977f1f4e471d&v=4)](https://github.com/pprados)[@pprados](https://github.com/pprados)\n\n[![](https://avatars.githubusercontent.com/u/17022025?u=ceee62d53f1c06bf9a014096b651ca0c42cfea3b&v=4)](https://github.com/zc277584121)[@zc277584121](https://github.com/zc277584121)\n\n[![](https://avatars.githubusercontent.com/u/5001050?u=d5d0c24dc9566cec4b8e3cd376150c05b42c5210&v=4)](https://github.com/HunterGerlach)[@HunterGerlach](https://github.com/HunterGerlach)\n\n[![](https://avatars.githubusercontent.com/u/22633385?u=29190f6c8aed91fa9574b064a9995f1e49944acf&v=4)](https://github.com/eltociear)[@eltociear](https://github.com/eltociear)\n\n[![](https://avatars.githubusercontent.com/u/753206?u=911ac7819a0dcf86bd5fd8ad8e4f986e22b8579b&v=4)](https://github.com/gkorland)[@gkorland](https://github.com/gkorland)\n\n[![](https://avatars.githubusercontent.com/u/77560236?u=54a3bf63360d61f6571015dd46fa1d03460fbbc9&v=4)](https://github.com/Gordon-BP)[@Gordon-BP](https://github.com/Gordon-BP)\n\n[![](https://avatars.githubusercontent.com/u/18380243?u=746579a015b76842c0994cf04c623e683444fc90&v=4)](https://github.com/kzk-maeda)[@kzk-maeda](https://github.com/kzk-maeda)\n\n[![](https://avatars.githubusercontent.com/u/8893086?u=220ec6df446248eeb09a59230c017a2c57bf8e61&v=4)](https://github.com/saginawj)[@saginawj](https://github.com/saginawj)\n\n[![](https://avatars.githubusercontent.com/u/81822489?u=07badfd993685a278b1f929c1500a58837a6621d&v=4)](https://github.com/filip-halt)[@filip-halt](https://github.com/filip-halt)\n\n[![](https://avatars.githubusercontent.com/u/730013?v=4)](https://github.com/skozlovf)[@skozlovf](https://github.com/skozlovf)\n\n[![](https://avatars.githubusercontent.com/u/40636930?u=b1f3735dccd19433cc3aad1b673553bf7eb94723&v=4)](https://github.com/zachschillaci27)[@zachschillaci27](https://github.com/zachschillaci27)\n\n[![](https://avatars.githubusercontent.com/u/3032459?u=590f1489107c91803bbe75de26cfeeeb77b25f8d&v=4)](https://github.com/nelly-hateva)[@nelly-hateva](https://github.com/nelly-hateva)\n\n[![](https://avatars.githubusercontent.com/u/9318457?u=3dbf765a07fee48e3dd171851b8417c002a41f49&v=4)](https://github.com/rahul-trip)[@rahul-trip](https://github.com/rahul-trip)\n\n[![](https://avatars.githubusercontent.com/u/38650638?u=2b526137f18a7c41934c8da0722f1fedb74c3422&v=4)](https://github.com/wemysschen)[@wemysschen](https://github.com/wemysschen)\n\n[![](https://avatars.githubusercontent.com/u/22759784?v=4)](https://github.com/zanderchase)[@zanderchase](https://github.com/zanderchase)\n\n[![](https://avatars.githubusercontent.com/u/5894042?u=e34704516e5f58e932ce098a38747a9be8d614a5&v=4)](https://github.com/danielhjz)[@danielhjz](https://github.com/danielhjz)\n\n[![](https://avatars.githubusercontent.com/u/39944763?u=3074327b189542c2b47bb385b2d81d1e8ccb38e1&v=4)](https://github.com/os1ma)[@os1ma](https://github.com/os1ma)\n\n[![](https://avatars.githubusercontent.com/u/112245?u=c129f9b2439b082cca4a7a322e558fca514bb87d&v=4)](https://github.com/cevian)[@cevian](https://github.com/cevian)\n\n[![](https://avatars.githubusercontent.com/u/1309177?u=6328c998d93a48eba87c6b039783b8a7644c62c3&v=4)](https://github.com/charliermarsh)[@charliermarsh](https://github.com/charliermarsh)\n\n[![](https://avatars.githubusercontent.com/u/63123596?u=ae18d496d5a6ced90d57c147f102f7c5ecf8e63f&v=4)](https://github.com/maximeperrindev)[@maximeperrindev](https://github.com/maximeperrindev)\n\n[![](https://avatars.githubusercontent.com/u/2212586?v=4)](https://github.com/mackong)[@mackong](https://github.com/mackong)\n\n[![](https://avatars.githubusercontent.com/u/3760?u=1dfde576ef286346afcc2a71eaf1fdb2857fb547&v=4)](https://github.com/bborn)[@bborn](https://github.com/bborn)\n\n[![](https://avatars.githubusercontent.com/u/35945268?u=4379ecd5062eea0f6449c520ddde5fe1e3724500&v=4)](https://github.com/junkeon)[@junkeon](https://github.com/junkeon)\n\n[![](https://avatars.githubusercontent.com/u/129657162?u=353d87b0e8d4c628536e2e40a34a7622dc3c18ab&v=4)](https://github.com/jj701)[@jj701](https://github.com/jj701)\n\n[![](https://avatars.githubusercontent.com/u/26039352?v=4)](https://github.com/cauwulixuan)[@cauwulixuan](https://github.com/cauwulixuan)\n\n[![](https://avatars.githubusercontent.com/u/6406557?v=4)](https://github.com/markcusack)[@markcusack](https://github.com/markcusack)\n\n[![](https://avatars.githubusercontent.com/u/347398?v=4)](https://github.com/delip)[@delip](https://github.com/delip)\n\n[![](https://avatars.githubusercontent.com/u/757060?u=0c7583422d4c2b5572616f9e542e110bf5dd15f7&v=4)](https://github.com/ichernev)[@ichernev](https://github.com/ichernev)\n\n[![](https://avatars.githubusercontent.com/u/1812592?v=4)](https://github.com/kennethchoe)[@kennethchoe](https://github.com/kennethchoe)\n\n[![](https://avatars.githubusercontent.com/u/70973560?u=1a40b7be391714894999b7412de2e281abad530e&v=4)](https://github.com/amiaxys)[@amiaxys](https://github.com/amiaxys)\n\n[![](https://avatars.githubusercontent.com/u/891664?u=722172a0061f68ab22819fa88a354ec973f70a63&v=4)](https://github.com/jeffchuber)[@jeffchuber](https://github.com/jeffchuber)\n\n[![](https://avatars.githubusercontent.com/u/1995599?v=4)](https://github.com/shane-huang)[@shane-huang](https://github.com/shane-huang)\n\n[![](https://avatars.githubusercontent.com/u/14149230?u=ca710ca2a64391470163ddef6b5ea7633ab26872&v=4)](https://github.com/cbh123)[@cbh123](https://github.com/cbh123)\n\n[![](https://avatars.githubusercontent.com/u/17517367?u=b745b5f2016fbf166a75ce6ec18853c2fe7bbf12&v=4)](https://github.com/sdelgadoc)[@sdelgadoc](https://github.com/sdelgadoc)\n\n[![](https://avatars.githubusercontent.com/u/5794505?u=f78511e1a6ab9ab879647fe0a4230fef964190b5&v=4)](https://github.com/MartinKolbAtWork)[@MartinKolbAtWork](https://github.com/MartinKolbAtWork)\n\n[![](https://avatars.githubusercontent.com/u/951187?u=e80c215810058f57145042d12360d463e3a53443&v=4)](https://github.com/jirimoravcik)[@jirimoravcik](https://github.com/jirimoravcik)\n\n[![](https://avatars.githubusercontent.com/u/75213811?v=4)](https://github.com/kitrak-rev)[@kitrak-rev](https://github.com/kitrak-rev)\n\n[![](https://avatars.githubusercontent.com/u/1157440?u=2f81a28298c1172e732898a1f8e800342434801d&v=4)](https://github.com/tazarov)[@tazarov](https://github.com/tazarov)\n\n[![](https://avatars.githubusercontent.com/u/12809212?u=8c1f0baf8a29f3007e3a51f5cf7b4a8e04c5ca8d&v=4)](https://github.com/parambharat)[@parambharat](https://github.com/parambharat)\n\n[![](https://avatars.githubusercontent.com/u/139942740?u=fa99ca083ccdc7322c7b24f8a3c001e71be347b4&v=4)](https://github.com/baichuan-assistant)[@baichuan-assistant](https://github.com/baichuan-assistant)\n\n[![](https://avatars.githubusercontent.com/u/22965499?u=883e3e34158ff6beadadef0178f83d1200be1acf&v=4)](https://github.com/sfvaroglu)[@sfvaroglu](https://github.com/sfvaroglu)\n\n[![](https://avatars.githubusercontent.com/u/116604821?u=ec1518c27a7a15f33a138cf0b956ef1758edbaff&v=4)](https://github.com/sfc-gh-jcarroll)[@sfc-gh-jcarroll](https://github.com/sfc-gh-jcarroll)\n\n[![](https://avatars.githubusercontent.com/u/20006225?u=b5c543736384589fcb5b547f0d7700e545cb41ba&v=4)](https://github.com/jeffzwang)[@jeffzwang](https://github.com/jeffzwang)\n\n[![](https://avatars.githubusercontent.com/u/128378696?u=8c818bd39c9cd75b606f3b5b1479787e4e6845d9&v=4)](https://github.com/BeatrixCohere)[@BeatrixCohere](https://github.com/BeatrixCohere)\n\n[![](https://avatars.githubusercontent.com/u/38215315?u=3985b6a3ecb0e8338c5912ea9e20787152d0ad7a&v=4)](https://github.com/P-E-B)[@P-E-B](https://github.com/P-E-B)\n\n[![](https://avatars.githubusercontent.com/u/3045965?u=3d3c34259d50723955dd92d1de5be21236989356&v=4)](https://github.com/chadj2)[@chadj2](https://github.com/chadj2)\n\n[![](https://avatars.githubusercontent.com/u/43734688?u=78f139fa940620e301361a58821c9f56128f71d9&v=4)](https://github.com/sam-h-bean)[@sam-h-bean](https://github.com/sam-h-bean)\n\n[![](https://avatars.githubusercontent.com/u/60664495?u=ace0011a868848b48cdf9c199110dc8e5be5f433&v=4)](https://github.com/williamdevena)[@williamdevena](https://github.com/williamdevena)\n\n[![](https://avatars.githubusercontent.com/u/31483888?u=55359c6f832dfed3abf0e89ea9842ec88849341d&v=4)](https://github.com/filip-michalsky)[@filip-michalsky](https://github.com/filip-michalsky)\n\n[![](https://avatars.githubusercontent.com/u/3207674?v=4)](https://github.com/k8si)[@k8si](https://github.com/k8si)\n\n[![](https://avatars.githubusercontent.com/u/339166?v=4)](https://github.com/alexsherstinsky)[@alexsherstinsky](https://github.com/alexsherstinsky)\n\n[![](https://avatars.githubusercontent.com/u/7287580?u=5fe01002eec3d9df91ce3cef0016916554379efd&v=4)](https://github.com/edwardzjl)[@edwardzjl](https://github.com/edwardzjl)\n\n[![](https://avatars.githubusercontent.com/u/63742054?u=befe4ae74b906698be965bad482d0e02fc7707ab&v=4)](https://github.com/Nutlope)[@Nutlope](https://github.com/Nutlope)\n\n[![](https://avatars.githubusercontent.com/u/26054637?u=edd1e4f54e91b549f2edb525d43210f4f04d7367&v=4)](https://github.com/paul-paliychuk)[@paul-paliychuk](https://github.com/paul-paliychuk)\n\n[![](https://avatars.githubusercontent.com/u/4133076?u=f3f783e0364abe955dbde6af80445ea27d948fdd&v=4)](https://github.com/gregnr)[@gregnr](https://github.com/gregnr)\n\n[![](https://avatars.githubusercontent.com/u/70665700?u=d7c78b0f3e6c5b1f359d574cd03bdb75bf6bf2da&v=4)](https://github.com/asamant21)[@asamant21](https://github.com/asamant21)\n\n[![](https://avatars.githubusercontent.com/u/12044110?v=4)](https://github.com/sudranga)[@sudranga](https://github.com/sudranga)\n\n[![](https://avatars.githubusercontent.com/u/5168949?v=4)](https://github.com/sseide)[@sseide](https://github.com/sseide)\n\n[![](https://avatars.githubusercontent.com/u/216931?u=a8ca27d75e1765295ea9d23c191d8db834951066&v=4)](https://github.com/scottnath)[@scottnath](https://github.com/scottnath)\n\n[![](https://avatars.githubusercontent.com/u/125713079?u=d42f76da6ffe0be48277c5ebdec4684ff1b38415&v=4)](https://github.com/AI-Bassem)[@AI-Bassem](https://github.com/AI-Bassem)\n\n[![](https://avatars.githubusercontent.com/u/32453863?v=4)](https://github.com/BeautyyuYanli)[@BeautyyuYanli](https://github.com/BeautyyuYanli)\n\n[![](https://avatars.githubusercontent.com/u/167348611?v=4)](https://github.com/dglogo)[@dglogo](https://github.com/dglogo)\n\n[![](https://avatars.githubusercontent.com/u/1074525?v=4)](https://github.com/gradenr)[@gradenr](https://github.com/gradenr)\n\n[![](https://avatars.githubusercontent.com/u/24482442?u=d6095b9533599b26d16fe6273d8f513206976a62&v=4)](https://github.com/rohanaggarwal7997)[@rohanaggarwal7997](https://github.com/rohanaggarwal7997)\n\n[![](https://avatars.githubusercontent.com/u/4787922?u=dd4c7a18d86a6ad56455aa13e66daedbbbcf31b7&v=4)](https://github.com/zhaoshengbo)[@zhaoshengbo](https://github.com/zhaoshengbo)\n\n[![](https://avatars.githubusercontent.com/u/14350521?u=4d5e9bb44d41a1ff30f2efbb2959a21e33644e81&v=4)](https://github.com/hakantekgul)[@hakantekgul](https://github.com/hakantekgul)\n\n[![](https://avatars.githubusercontent.com/u/142571618?v=4)](https://github.com/eryk-dsai)[@eryk-dsai](https://github.com/eryk-dsai)\n\n[![](https://avatars.githubusercontent.com/u/3469711?u=6962798c0280caa0d0260ccb8be1b18fb3ea44b2&v=4)](https://github.com/mrtj)[@mrtj](https://github.com/mrtj)\n\n[![](https://avatars.githubusercontent.com/u/5069448?u=6b0ba426b68777f4935399013b7c2c112635c0df&v=4)](https://github.com/pcliupc)[@pcliupc](https://github.com/pcliupc)\n\n[![](https://avatars.githubusercontent.com/u/36760800?u=12735f9035294180cb0b83446bdf7d8ac1a3fef9&v=4)](https://github.com/alvarobartt)[@alvarobartt](https://github.com/alvarobartt)\n\n[![](https://avatars.githubusercontent.com/u/124558887?u=843f9f9de97097d85d0f685e0916d58196554421&v=4)](https://github.com/rogerserper)[@rogerserper](https://github.com/rogerserper)\n\n[![](https://avatars.githubusercontent.com/u/320302?u=657574cdbadd4bfb4c8ed65f8646d4983d7ca5f0&v=4)](https://github.com/ekzhu)[@ekzhu](https://github.com/ekzhu)\n\n[![](https://avatars.githubusercontent.com/u/139821907?u=f6f9648457adc2c15f407bb06d29089ae7e6f4cf&v=4)](https://github.com/ashleyxuu)[@ashleyxuu](https://github.com/ashleyxuu)\n\n[![](https://avatars.githubusercontent.com/u/4036753?u=c6732c896b41c1ecec917bfae38aa6900585c632&v=4)](https://github.com/bhalder)[@bhalder](https://github.com/bhalder)\n\n[![](https://avatars.githubusercontent.com/u/57731498?u=fec622b37ca3dc04125144116ad5165f37f85823&v=4)](https://github.com/mattgotteiner)[@mattgotteiner](https://github.com/mattgotteiner)\n\n[![](https://avatars.githubusercontent.com/u/17904229?u=3c9fa8237a9d29136d3bd1dd2a380ff6dddb5d94&v=4)](https://github.com/ZixinYang)[@ZixinYang](https://github.com/ZixinYang)\n\n[![](https://avatars.githubusercontent.com/u/48101485?u=dcf140777416a7d86a450964fc53ec5b17668603&v=4)](https://github.com/nikhilkjha)[@nikhilkjha](https://github.com/nikhilkjha)\n\n[![](https://avatars.githubusercontent.com/u/43818888?u=0c01fad081c0abd23d2d49ea4496890ffbc22325&v=4)](https://github.com/Dominastorm)[@Dominastorm](https://github.com/Dominastorm)\n\n[![](https://avatars.githubusercontent.com/u/13537446?v=4)](https://github.com/raunakshrivastava7)[@raunakshrivastava7](https://github.com/raunakshrivastava7)\n\n[![](https://avatars.githubusercontent.com/u/121117945?v=4)](https://github.com/rodrigo-f-nogueira)[@rodrigo-f-nogueira](https://github.com/rodrigo-f-nogueira)\n\n[![](https://avatars.githubusercontent.com/u/1585539?u=654a21985c875f78a20eda7e4884e8d64de86fba&v=4)](https://github.com/benjibc)[@benjibc](https://github.com/benjibc)\n\n[![](https://avatars.githubusercontent.com/u/53276514?u=d08fad4653e8d1b89382507a07f6990437730433&v=4)](https://github.com/hoyungcher)[@hoyungcher](https://github.com/hoyungcher)\n\n[![](https://avatars.githubusercontent.com/u/41710527?u=788f651d9933b36523feb431811a6531ecd994f1&v=4)](https://github.com/OwenPendrighElliott)[@OwenPendrighElliott](https://github.com/OwenPendrighElliott)\n\n[![](https://avatars.githubusercontent.com/u/67210837?u=7e6d3db8c71e8fdd631017b8c9f6b83248923007&v=4)](https://github.com/KyrianC)[@KyrianC](https://github.com/KyrianC)\n\n[![](https://avatars.githubusercontent.com/u/8142467?u=a62a20762c7fd841b470efc0ebdf5e1a01816f87&v=4)](https://github.com/Mikelarg)[@Mikelarg](https://github.com/Mikelarg)\n\n[![](https://avatars.githubusercontent.com/u/8862797?u=1856f20a3ac7425e75df7860bfd8934278fbdd53&v=4)](https://github.com/netoferraz)[@netoferraz](https://github.com/netoferraz)\n\n[![](https://avatars.githubusercontent.com/u/12782505?u=a3f1c6e7e68b96bb7be08ecd25f74f2396394597&v=4)](https://github.com/nithishr)[@nithishr](https://github.com/nithishr)\n\n[![](https://avatars.githubusercontent.com/u/3625100?u=b219abaae5763632a0edf8d79b46dca035f166a4&v=4)](https://github.com/zizhong)[@zizhong](https://github.com/zizhong)\n\n[![](https://avatars.githubusercontent.com/u/81076998?v=4)](https://github.com/amicus-veritatis)[@amicus-veritatis](https://github.com/amicus-veritatis)\n\n[![](https://avatars.githubusercontent.com/u/18572161?u=a09c7a053aa54cfc62ff8530c81486441215a09c&v=4)](https://github.com/MikeNitsenko)[@MikeNitsenko](https://github.com/MikeNitsenko)\n\n[![](https://avatars.githubusercontent.com/u/7851093?u=ab3c2c9c6ebd0cd1cd3ff2f83f8618ab9b2550ad&v=4)](https://github.com/liangz1)[@liangz1](https://github.com/liangz1)\n\n[![](https://avatars.githubusercontent.com/u/7953259?u=a451fad7ad197a8920651cf89aaf5d950734d0a8&v=4)](https://github.com/mikelambert)[@mikelambert](https://github.com/mikelambert)\n\n[![](https://avatars.githubusercontent.com/u/23314389?u=2014e20e246530fa89bd902fe703b6f9e6ecf833&v=4)](https://github.com/nicoloboschi)[@nicoloboschi](https://github.com/nicoloboschi)\n\n[![](https://avatars.githubusercontent.com/u/136885?u=9a42f56ad8055a03a5ae8a0272e66d1ae4ac083c&v=4)](https://github.com/mkorpela)[@mkorpela](https://github.com/mkorpela)\n\n[![](https://avatars.githubusercontent.com/u/31125281?u=1bc56191c789906c2a11a4183c108b2784609015&v=4)](https://github.com/linancn)[@linancn](https://github.com/linancn)\n\n[![](https://avatars.githubusercontent.com/u/101817?u=39f31ff29d2589046148c6ed1c1c923982d86b1a&v=4)](https://github.com/tsg)[@tsg](https://github.com/tsg)\n\n[![](https://avatars.githubusercontent.com/u/85610855?v=4)](https://github.com/am-kinetica)[@am-kinetica](https://github.com/am-kinetica)\n\n[![](https://avatars.githubusercontent.com/u/51159628?u=5aec3cf0263e77234dd83f8e6bf4955e39acd472&v=4)](https://github.com/anar2706)[@anar2706](https://github.com/anar2706)\n\n[![](https://avatars.githubusercontent.com/u/79988483?u=7b1cf8516362448115fc68870ad006a37a99d549&v=4)](https://github.com/yifeis7)[@yifeis7](https://github.com/yifeis7)\n\n[![](https://avatars.githubusercontent.com/u/908389?v=4)](https://github.com/whitead)[@whitead](https://github.com/whitead)\n\n[![](https://avatars.githubusercontent.com/u/89472452?u=47bcc0d72d51f2f914a759a0fde9ef3d1c677b98&v=4)](https://github.com/benitoThree)[@benitoThree](https://github.com/benitoThree)\n\n[![](https://avatars.githubusercontent.com/u/3300000?v=4)](https://github.com/ruze00)[@ruze00](https://github.com/ruze00)\n\n[![](https://avatars.githubusercontent.com/u/34462078?u=20243a60ac608142887c14251502c2a975614ba3&v=4)](https://github.com/raghavdixit99)[@raghavdixit99](https://github.com/raghavdixit99)\n\n[![](https://avatars.githubusercontent.com/u/53417823?v=4)](https://github.com/HeChangHaoGary)[@HeChangHaoGary](https://github.com/HeChangHaoGary)\n\n[![](https://avatars.githubusercontent.com/u/2851934?u=01c0d440fcb7fdb3159a7b641c58b5595028e9bc&v=4)](https://github.com/xiaoyuxee)[@xiaoyuxee](https://github.com/xiaoyuxee)\n\n[![](https://avatars.githubusercontent.com/u/15706966?u=f6dd024f1fc955b7d411eb13ebcae7334b527063&v=4)](https://github.com/jerwelborn)[@jerwelborn](https://github.com/jerwelborn)\n\n[![](https://avatars.githubusercontent.com/u/58508471?u=74423e863298863bf5c7dd7d1bff0aa106a9cc75&v=4)](https://github.com/Anindyadeep)[@Anindyadeep](https://github.com/Anindyadeep)\n\n[![](https://avatars.githubusercontent.com/u/65446134?u=a292659bc2611825b65a56a7ee6bfe6fdbfa033b&v=4)](https://github.com/vairodp)[@vairodp](https://github.com/vairodp)\n\n[![](https://avatars.githubusercontent.com/u/23406704?u=ac10555099789a8423dbc205ab4257b40aaf3860&v=4)](https://github.com/aletna)[@aletna](https://github.com/aletna)\n\n[![](https://avatars.githubusercontent.com/u/2398765?u=0c438bd074b242c5896334e6da1f0801c2f581e4&v=4)](https://github.com/hsm207)[@hsm207](https://github.com/hsm207)\n\n[![](https://avatars.githubusercontent.com/u/10937540?u=fcc094d7dfef2d3778c989def06199d9dc84fb61&v=4)](https://github.com/freemso)[@freemso](https://github.com/freemso)\n\n[![](https://avatars.githubusercontent.com/u/34411969?u=ae4aac513e377777fd6e46980e0e9414cdcd6f96&v=4)](https://github.com/DayuanJiang)[@DayuanJiang](https://github.com/DayuanJiang)\n\n[![](https://avatars.githubusercontent.com/u/7080882?u=f985127fd58fa96b886d591ce104f29f3bd7f81f&v=4)](https://github.com/rigazilla)[@rigazilla](https://github.com/rigazilla)\n\n[![](https://avatars.githubusercontent.com/u/4726889?u=1db838ee4066c26d5c0fa02311c7895c36969fb7&v=4)](https://github.com/apepkuss)[@apepkuss](https://github.com/apepkuss)\n\n[![](https://avatars.githubusercontent.com/u/69025547?u=97202d8501d38ed5015cfb3c40cf0ba2daeb795c&v=4)](https://github.com/gadhagod)[@gadhagod](https://github.com/gadhagod)\n\n[![](https://avatars.githubusercontent.com/u/154643880?u=3792a3c4581984a90f91ab05f720fd3d7b647d5b&v=4)](https://github.com/raveharpaz)[@raveharpaz](https://github.com/raveharpaz)\n\n[![](https://avatars.githubusercontent.com/u/91019033?u=30944d2fcb8759eefe2efa26c4d07b218d25ae33&v=4)](https://github.com/matthewdeguzman)[@matthewdeguzman](https://github.com/matthewdeguzman)\n\n[![](https://avatars.githubusercontent.com/u/13414571?u=c5490c987e1bcf8d47d7ecc4dca3812a21713f3a&v=4)](https://github.com/Tokkiu)[@Tokkiu](https://github.com/Tokkiu)\n\n[![](https://avatars.githubusercontent.com/u/100361543?u=f022d60888add75594372c5e8ebb32fc7fdc2794&v=4)](https://github.com/softboyjimbo)[@softboyjimbo](https://github.com/softboyjimbo)\n\n[![](https://avatars.githubusercontent.com/u/96572405?u=7784695f37788fb8048f6ce213bf1df3d4713f2d&v=4)](https://github.com/zhanghexian)[@zhanghexian](https://github.com/zhanghexian)\n\n[![](https://avatars.githubusercontent.com/u/117737297?u=0adf0f84cc345cc6e2ca3e4ad3c27a9ca8f53472&v=4)](https://github.com/rajtilakjee)[@rajtilakjee](https://github.com/rajtilakjee)\n\n[![](https://avatars.githubusercontent.com/u/1983160?u=536f2558c6ac33b74a6d89520dcb27ba46954070&v=4)](https://github.com/ashvardanian)[@ashvardanian](https://github.com/ashvardanian)\n\n[![](https://avatars.githubusercontent.com/u/4983896?u=4a0ba92f5b46b0c805a3c4715748f042a8c769a0&v=4)](https://github.com/plv)[@plv](https://github.com/plv)\n\n[![](https://avatars.githubusercontent.com/u/872712?u=c6e76fb451e3a0c1528a8d0e95ef3ed669483690&v=4)](https://github.com/TomTom101)[@TomTom101](https://github.com/TomTom101)\n\n[![](https://avatars.githubusercontent.com/u/43986145?u=3d15192e4d6ae36696e49e6c061d29f074f5ba77&v=4)](https://github.com/juliuslipp)[@juliuslipp](https://github.com/juliuslipp)\n\n[![](https://avatars.githubusercontent.com/u/1078320?u=786a976f97c3b9a75bd7467579d77e303d2acc8d&v=4)](https://github.com/pors)[@pors](https://github.com/pors)\n\n[![](https://avatars.githubusercontent.com/u/22906652?u=bee195145bb46c722da707939100f3a5a46fc8b9&v=4)](https://github.com/shivanimodi16)[@shivanimodi16](https://github.com/shivanimodi16)\n\n[![](https://avatars.githubusercontent.com/u/11373553?u=cebc40130d1da9f7ac666a2f6237a3c1148f65ef&v=4)](https://github.com/thomas0809)[@thomas0809](https://github.com/thomas0809)\n\n[![](https://avatars.githubusercontent.com/u/55012400?u=0a53d356ee0f3babed5fd7b3aec73a9e6b1724e6&v=4)](https://github.com/azamiftikhar1000)[@azamiftikhar1000](https://github.com/azamiftikhar1000)\n\n[![](https://avatars.githubusercontent.com/u/135340?v=4)](https://github.com/alecf)[@alecf](https://github.com/alecf)\n\n[![](https://avatars.githubusercontent.com/u/6756744?u=f576bd2ad9bb2ebfc8d45feb4a49e8add9ae79dc&v=4)](https://github.com/ecneladis)[@ecneladis](https://github.com/ecneladis)\n\n[![](https://avatars.githubusercontent.com/u/72488598?u=98dc24a63369cbae14913caff5f379f80f305aab&v=4)](https://github.com/Undertone0809)[@Undertone0809](https://github.com/Undertone0809)\n\n[![](https://avatars.githubusercontent.com/u/45447813?u=6d1f8b455599848e6cd9c2410ba5f4f02d2d368c&v=4)](https://github.com/hetaoBackend)[@hetaoBackend](https://github.com/hetaoBackend)\n\n[![](https://avatars.githubusercontent.com/u/1636116?u=617e8ebbd68598aada3a04642e7801c6b1dda152&v=4)](https://github.com/herrjemand)[@herrjemand](https://github.com/herrjemand)\n\n[![](https://avatars.githubusercontent.com/u/5798036?u=4eba31d63c3818d17fb8f9aa923599ac63ebfea8&v=4)](https://github.com/lesters)[@lesters](https://github.com/lesters)\n\n[![](https://avatars.githubusercontent.com/u/115359769?v=4)](https://github.com/max-arthurai)[@max-arthurai](https://github.com/max-arthurai)\n\n[![](https://avatars.githubusercontent.com/u/98474633?u=32ebf212dfc4d68c87f864c7d5bb9967ac85c96e&v=4)](https://github.com/philipkiely-baseten)[@philipkiely-baseten](https://github.com/philipkiely-baseten)\n\n[![](https://avatars.githubusercontent.com/u/45048633?v=4)](https://github.com/schadem)[@schadem](https://github.com/schadem)\n\n[![](https://avatars.githubusercontent.com/u/127325395?v=4)](https://github.com/Aratako)[@Aratako](https://github.com/Aratako)\n\n[![](https://avatars.githubusercontent.com/u/4067380?u=2776e796abeb0dfa8371dd528165ff0d96024a83&v=4)](https://github.com/anubhav94N)[@anubhav94N](https://github.com/anubhav94N)\n\n[![](https://avatars.githubusercontent.com/u/81988348?v=4)](https://github.com/rithwik-db)[@rithwik-db](https://github.com/rithwik-db)\n\n[![](https://avatars.githubusercontent.com/u/50788154?u=f924ef4e8d2b47be96f7a4b4357d17b6fafaea80&v=4)](https://github.com/kartheekyakkala)[@kartheekyakkala](https://github.com/kartheekyakkala)\n\n[![](https://avatars.githubusercontent.com/u/105399924?u=e69e8f1af87a33af3ecbdd5b5d4327c6dc254df6&v=4)](https://github.com/jiayini1119)[@jiayini1119](https://github.com/jiayini1119)\n\n[![](https://avatars.githubusercontent.com/u/14010132?u=7b08fe21105fd9835fe7e7c55a2174f2ec4d0a91&v=4)](https://github.com/aayush3011)[@aayush3011](https://github.com/aayush3011)\n\n[![](https://avatars.githubusercontent.com/u/11540660?u=efe357bf4cbe05c882528cc3ad78214776b80158&v=4)](https://github.com/shufanhao)[@shufanhao](https://github.com/shufanhao)\n\n[![](https://avatars.githubusercontent.com/u/13724617?v=4)](https://github.com/zcgeng)[@zcgeng](https://github.com/zcgeng)\n\n[![](https://avatars.githubusercontent.com/u/93145909?u=38b3ccf07a613963e9897627f940912128b7a83a&v=4)](https://github.com/ash0ts)[@ash0ts](https://github.com/ash0ts)\n\n[![](https://avatars.githubusercontent.com/u/119620994?u=ac3dfad90764c69144f593023fce93080586702e&v=4)](https://github.com/Honkware)[@Honkware](https://github.com/Honkware)\n\n[![](https://avatars.githubusercontent.com/u/4524535?u=6a41acd9f233fa9e62294d5534d1f2f52faa6b78&v=4)](https://github.com/dwhitena)[@dwhitena](https://github.com/dwhitena)\n\n[![](https://avatars.githubusercontent.com/u/21286981?v=4)](https://github.com/SagarBM396)[@SagarBM396](https://github.com/SagarBM396)\n\n[![](https://avatars.githubusercontent.com/u/88007022?u=1d49b0aa10dcff5b6661b211331334c165c56f28&v=4)](https://github.com/jamie256)[@jamie256](https://github.com/jamie256)\n\n[![](https://avatars.githubusercontent.com/u/2283778?u=0c5a2a583bc77b138b346c5974551ac459059026&v=4)](https://github.com/yanghua)[@yanghua](https://github.com/yanghua)\n\n[![](https://avatars.githubusercontent.com/u/62718109?u=ab38af3009ae3adcff49a309580e55bc6f586ba2&v=4)](https://github.com/klein-t)[@klein-t](https://github.com/klein-t)\n\n[![](https://avatars.githubusercontent.com/u/110841617?u=e473cda5a87ca1dae11082c11db9c1ed1f4c7032&v=4)](https://github.com/erika-cardenas)[@erika-cardenas](https://github.com/erika-cardenas)\n\n[![](https://avatars.githubusercontent.com/u/13636019?v=4)](https://github.com/Ayan-Bandyopadhyay)[@Ayan-Bandyopadhyay](https://github.com/Ayan-Bandyopadhyay)\n\n[![](https://avatars.githubusercontent.com/u/27293258?u=3349429e2b89bb75f144bb22c4015d9b676f3fca&v=4)](https://github.com/tugot17)[@tugot17](https://github.com/tugot17)\n\n[![](https://avatars.githubusercontent.com/u/13009163?u=c2b3a11cceaadbc9415f545b971250c9e2b2078b&v=4)](https://github.com/Spartee)[@Spartee](https://github.com/Spartee)\n\n[![](https://avatars.githubusercontent.com/u/22459070?u=c541f86a16a5b46ae138a7bf1efdce36dd413f24&v=4)](https://github.com/Jflick58)[@Jflick58](https://github.com/Jflick58)\n\n[![](https://avatars.githubusercontent.com/u/20140126?u=d1b9220a46efe488dc3db52e5d92774d85d38dfc&v=4)](https://github.com/JuHyung-Son)[@JuHyung-Son](https://github.com/JuHyung-Son)\n\n[![](https://avatars.githubusercontent.com/u/949393?u=66d8768dc44519c956069acd88cfb1b0dca646f8&v=4)](https://github.com/stewartjarod)[@stewartjarod](https://github.com/stewartjarod)\n\n[![](https://avatars.githubusercontent.com/u/8279655?v=4)](https://github.com/cxumol)[@cxumol](https://github.com/cxumol)\n\n[![](https://avatars.githubusercontent.com/u/31288628?u=acdfcef703b0d07b69e70e32e20130c05a56a549&v=4)](https://github.com/rihardsgravis)[@rihardsgravis](https://github.com/rihardsgravis)\n\n[![](https://avatars.githubusercontent.com/u/31483498?u=aa8561cc1055386d7753a7f82bf823bbdbae4919&v=4)](https://github.com/kouroshHakha)[@kouroshHakha](https://github.com/kouroshHakha)\n\n[![](https://avatars.githubusercontent.com/u/24364830?u=ae92d85547ad5a3bfe9967ec333c6a1b775d1204&v=4)](https://github.com/ByronHsu)[@ByronHsu](https://github.com/ByronHsu)\n\n[![](https://avatars.githubusercontent.com/u/28208564?u=ab938a1030cc6d630609a6d76b1ada65a3009020&v=4)](https://github.com/O-Roma)[@O-Roma](https://github.com/O-Roma)\n\n[![](https://avatars.githubusercontent.com/u/808798?u=8a25786f1b28a0ddf171299eee7c14d9e9f2939b&v=4)](https://github.com/rowillia)[@rowillia](https://github.com/rowillia)\n\n[![](https://avatars.githubusercontent.com/u/13447955?v=4)](https://github.com/lesterpjy)[@lesterpjy](https://github.com/lesterpjy)\n\n[![](https://avatars.githubusercontent.com/u/19216250?u=85921f52a4be080e3529d87d3e3e75bf83847b24&v=4)](https://github.com/junefish)[@junefish](https://github.com/junefish)\n\n[![](https://avatars.githubusercontent.com/u/107998986?u=70520f8a4ad962c0fc2706649ec401b274681927&v=4)](https://github.com/2jimoo)[@2jimoo](https://github.com/2jimoo)\n\n[![](https://avatars.githubusercontent.com/u/55656?u=b9b6aa80966abd617ffed498f3a15b20d3644604&v=4)](https://github.com/petervandenabeele)[@petervandenabeele](https://github.com/petervandenabeele)\n\n[![](https://avatars.githubusercontent.com/u/17451563?v=4)](https://github.com/shahrin014)[@shahrin014](https://github.com/shahrin014)\n\n[![](https://avatars.githubusercontent.com/u/3849275?u=5de71c0b6eaea94c0460c1dc18a1a346168f8720&v=4)](https://github.com/shoelsch)[@shoelsch](https://github.com/shoelsch)\n\n[![](https://avatars.githubusercontent.com/u/45851384?u=c9c158b6040b1fd8ae5543bad513260e157d5892&v=4)](https://github.com/h0rv)[@h0rv](https://github.com/h0rv)\n\n[![](https://avatars.githubusercontent.com/u/19825685?u=c9346281a8534aeaf9f112c0f7ca749de5cb8e23&v=4)](https://github.com/JoanFM)[@JoanFM](https://github.com/JoanFM)\n\n[![](https://avatars.githubusercontent.com/u/18037290?u=73f09eb601032e6ff84af14ab80ac8c8c9cebff3&v=4)](https://github.com/asai95)[@asai95](https://github.com/asai95)\n\n[![](https://avatars.githubusercontent.com/u/3195154?u=baa3820b95103662bc2aca01959e41aa651764b5&v=4)](https://github.com/mgoin)[@mgoin](https://github.com/mgoin)\n\n[![](https://avatars.githubusercontent.com/u/23445657?u=84dda94e9330c5538ea94099b5cae699c88586f8&v=4)](https://github.com/Blaizzy)[@Blaizzy](https://github.com/Blaizzy)\n\n[![](https://avatars.githubusercontent.com/u/38002468?u=dd6ba12322fa2ee0d88e83a3773c8abc13ec37af&v=4)](https://github.com/akmhmgc)[@akmhmgc](https://github.com/akmhmgc)\n\n[![](https://avatars.githubusercontent.com/u/4693180?u=8cf781d9099d6e2f2d2caf7612a5c2811ba13ef8&v=4)](https://github.com/gmpetrov)[@gmpetrov](https://github.com/gmpetrov)\n\n[![](https://avatars.githubusercontent.com/u/29749331?u=a7f4d7db2faa6af42af8d43b2737b5547d36154d&v=4)](https://github.com/aarnphm)[@aarnphm](https://github.com/aarnphm)\n\n[![](https://avatars.githubusercontent.com/u/43019056?u=9066bb1f7b39a46309c387650c0ce5b7423f79da&v=4)](https://github.com/aMahanna)[@aMahanna](https://github.com/aMahanna)\n\n[![](https://avatars.githubusercontent.com/u/39014459?v=4)](https://github.com/hp0404)[@hp0404](https://github.com/hp0404)\n\n[![](https://avatars.githubusercontent.com/u/2098020?u=0e1ecc0cc5eab98d93c0eaa7e210a1de937d95d9&v=4)](https://github.com/liushuaikobe)[@liushuaikobe](https://github.com/liushuaikobe)\n\n[![](https://avatars.githubusercontent.com/u/115371133?u=a032d8cc4a47b9a25bc7a1699a73506bdb752ea2&v=4)](https://github.com/fserv)[@fserv](https://github.com/fserv)\n\n[![](https://avatars.githubusercontent.com/u/5289083?u=d663551cd0b6e74091abd6272c35c9e02e82d6c0&v=4)](https://github.com/seanmavley)[@seanmavley](https://github.com/seanmavley)\n\n[![](https://avatars.githubusercontent.com/u/37284105?u=be61bf8a5cef1060aeeb63a9bdd0a18f2edfe8d1&v=4)](https://github.com/cloudscool)[@cloudscool](https://github.com/cloudscool)\n\n[![](https://avatars.githubusercontent.com/u/243665?u=4f7f2b3bbc666f530bf0e61bf6a4b32f5fcec433&v=4)](https://github.com/Lothiraldan)[@Lothiraldan](https://github.com/Lothiraldan)\n\n[![](https://avatars.githubusercontent.com/u/2106106?u=e59f1d37d627161dc1739d290d1aedfb7348f1ab&v=4)](https://github.com/Ather23)[@Ather23](https://github.com/Ather23)\n\n[![](https://avatars.githubusercontent.com/u/143642606?u=83091119b6b84c82b741298e9c9252161868bae7&v=4)](https://github.com/mogith-pn)[@mogith-pn](https://github.com/mogith-pn)\n\n[![](https://avatars.githubusercontent.com/u/6266815?v=4)](https://github.com/JohnnyDeuss)[@JohnnyDeuss](https://github.com/JohnnyDeuss)\n\n[![](https://avatars.githubusercontent.com/u/43149077?u=26d40f875b701db58f54af0441501c12e86dec6f&v=4)](https://github.com/dakinggg)[@dakinggg](https://github.com/dakinggg)\n\n[![](https://avatars.githubusercontent.com/u/32113413?u=069f880e88a96db6ad955e3cc9fc7f9dfcf2beef&v=4)](https://github.com/jackwotherspoon)[@jackwotherspoon](https://github.com/jackwotherspoon)\n\n[![](https://avatars.githubusercontent.com/u/4492530?u=142efae122e461996caa5cc6d41b9b5f0549c047&v=4)](https://github.com/philippe2803)[@philippe2803](https://github.com/philippe2803)\n\n[![](https://avatars.githubusercontent.com/u/2644049?v=4)](https://github.com/wnleao)[@wnleao](https://github.com/wnleao)\n\n[![](https://avatars.githubusercontent.com/u/73353463?u=b07dac98e10a359f1a21dc08e61144e3671ca22f&v=4)](https://github.com/hmasdev)[@hmasdev](https://github.com/hmasdev)\n\n[![](https://avatars.githubusercontent.com/u/99611484?u=f421fe8a2917ae3ea24d83f056646055a00d3174&v=4)](https://github.com/kdcokenny)[@kdcokenny](https://github.com/kdcokenny)\n\n[![](https://avatars.githubusercontent.com/u/3761730?u=16424feb9e18fc01df9d2c58699454f3016e79db&v=4)](https://github.com/qtangs)[@qtangs](https://github.com/qtangs)\n\n[![](https://avatars.githubusercontent.com/u/1651790?u=5a5ea37c495f7787f35172f0f86569daf5a5a65e&v=4)](https://github.com/wey-gu)[@wey-gu](https://github.com/wey-gu)\n\n[![](https://avatars.githubusercontent.com/u/57228345?v=4)](https://github.com/CahidArda)[@CahidArda](https://github.com/CahidArda)\n\n[![](https://avatars.githubusercontent.com/u/54905519?u=9818cccb258351fd0abec07b4acfb414a0383823&v=4)](https://github.com/Sukitly)[@Sukitly](https://github.com/Sukitly)\n\n[![](https://avatars.githubusercontent.com/u/2951285?u=571c795227b4edbd29f027478346834f83a95076&v=4)](https://github.com/samber)[@samber](https://github.com/samber)\n\n[![](https://avatars.githubusercontent.com/u/601530?u=ab242d6500886c4f8799101543d5b1f7841f1104&v=4)](https://github.com/Atry)[@Atry](https://github.com/Atry)\n\n[![](https://avatars.githubusercontent.com/u/2700370?u=421c7cd75c8f7f1a28e6f6c19a5d587a6d478ed0&v=4)](https://github.com/chosh0615)[@chosh0615](https://github.com/chosh0615)\n\n[![](https://avatars.githubusercontent.com/u/3009596?u=bbc154ae159c938e6e0c4045dc1b7980696b402a&v=4)](https://github.com/avsolatorio)[@avsolatorio](https://github.com/avsolatorio)\n\n[![](https://avatars.githubusercontent.com/u/90301759?v=4)](https://github.com/19374242)[@19374242](https://github.com/19374242)\n\n[![](https://avatars.githubusercontent.com/u/4491983?u=9265a9310ce2fa08b9429dc5d68da5b8677058ba&v=4)](https://github.com/leedotpang)[@leedotpang](https://github.com/leedotpang)\n\n[![](https://avatars.githubusercontent.com/u/39889?u=bd28816c18beaddc4da762d61d842547fdb271d9&v=4)](https://github.com/yarikoptic)[@yarikoptic](https://github.com/yarikoptic)\n\n[![](https://avatars.githubusercontent.com/u/52778543?u=504d8eb452ab2103a86ab469dd793eab49c8a437&v=4)](https://github.com/Jofthomas)[@Jofthomas](https://github.com/Jofthomas)\n\n[![](https://avatars.githubusercontent.com/u/57748216?u=e2029e1262ee9c9d9f5825b2d28952758a628f28&v=4)](https://github.com/marlenezw)[@marlenezw](https://github.com/marlenezw)\n\n[![](https://avatars.githubusercontent.com/u/23070692?u=bc8389d4c965994dee5b8cbadc420f8b4bcd5f0b&v=4)](https://github.com/rancomp)[@rancomp](https://github.com/rancomp)\n\n[![](https://avatars.githubusercontent.com/u/1540803?v=4)](https://github.com/morganda)[@morganda](https://github.com/morganda)\n\n[![](https://avatars.githubusercontent.com/u/1302641?u=643198eed0646ee2e18e22d6b6dab509bf9b2505&v=4)](https://github.com/atroyn)[@atroyn](https://github.com/atroyn)\n\n[![](https://avatars.githubusercontent.com/u/48685774?v=4)](https://github.com/dmenini)[@dmenini](https://github.com/dmenini)\n\n[![](https://avatars.githubusercontent.com/u/987457?u=a0dcd7b2cac59237d1ac2b43ca67a328ea7c437a&v=4)](https://github.com/brotchie)[@brotchie](https://github.com/brotchie)\n\n[![](https://avatars.githubusercontent.com/u/32129522?u=a6fc430ee58b3ebe776dec5fce16b686f81c8e12&v=4)](https://github.com/angeligareta)[@angeligareta](https://github.com/angeligareta)\n\n[![](https://avatars.githubusercontent.com/u/5279578?u=ce483437f50a425eab4b1f6f635ac49159f31576&v=4)](https://github.com/mmajewsk)[@mmajewsk](https://github.com/mmajewsk)\n\n[![](https://avatars.githubusercontent.com/u/3480154?u=f69c138e15366ba9c15cafd3c753a7ba7da44ad5&v=4)](https://github.com/wangwei1237)[@wangwei1237](https://github.com/wangwei1237)\n\n[![](https://avatars.githubusercontent.com/u/116048415?v=4)](https://github.com/nimimeht)[@nimimeht](https://github.com/nimimeht)\n\n[![](https://avatars.githubusercontent.com/u/5055697?v=4)](https://github.com/alexiri)[@alexiri](https://github.com/alexiri)\n\n[![](https://avatars.githubusercontent.com/u/12781611?v=4)](https://github.com/rjanardhan3)[@rjanardhan3](https://github.com/rjanardhan3)\n\n[![](https://avatars.githubusercontent.com/u/136875?u=611195240df6f68e816214bb865174384b74437e&v=4)](https://github.com/msaelices)[@msaelices](https://github.com/msaelices)\n\n[![](https://avatars.githubusercontent.com/u/21985684?u=96e4830f5dfb5a4a6fcb504fddec997a50b56413&v=4)](https://github.com/SimFG)[@SimFG](https://github.com/SimFG)\n\n[![](https://avatars.githubusercontent.com/u/16047967?v=4)](https://github.com/StankoKuveljic)[@StankoKuveljic](https://github.com/StankoKuveljic)\n\n[![](https://avatars.githubusercontent.com/u/40655746?u=3c10115601fd5b032c3f274e79fd68dc5bb03921&v=4)](https://github.com/quchuyuan)[@quchuyuan](https://github.com/quchuyuan)\n\n[![](https://avatars.githubusercontent.com/u/82044803?u=451c2955f0862cccf64cac30e062570d208d6903&v=4)](https://github.com/serena-ruan)[@serena-ruan](https://github.com/serena-ruan)\n\n[![](https://avatars.githubusercontent.com/u/151817113?v=4)](https://github.com/sirjan-ws-ext)[@sirjan-ws-ext](https://github.com/sirjan-ws-ext)\n\n[![](https://avatars.githubusercontent.com/u/147840?v=4)](https://github.com/anentropic)[@anentropic](https://github.com/anentropic)\n\n[![](https://avatars.githubusercontent.com/u/65639964?u=6a48b9ecb8e188fee4117bffb055afb54566ba97&v=4)](https://github.com/EricLiclair)[@EricLiclair](https://github.com/EricLiclair)\n\n[![](https://avatars.githubusercontent.com/u/23413676?u=b5bef760f9d067457f460d4dd5036f7e5f50d197&v=4)](https://github.com/hsuyuming)[@hsuyuming](https://github.com/hsuyuming)\n\n[![](https://avatars.githubusercontent.com/u/1751809?u=b247b34fa5ccf9bb276ae318d57af47680994600&v=4)](https://github.com/asofter)[@asofter](https://github.com/asofter)\n\n[![](https://avatars.githubusercontent.com/u/16456186?u=b9b30585eb3ddd0c8819bda9694636303c510233&v=4)](https://github.com/ThatsJustCheesy)[@ThatsJustCheesy](https://github.com/ThatsJustCheesy)\n\n[![](https://avatars.githubusercontent.com/u/7942293?u=6d5e295620df234b697f25d94659ae85d2dd2060&v=4)](https://github.com/imeckr)[@imeckr](https://github.com/imeckr)\n\n[![](https://avatars.githubusercontent.com/u/7935430?v=4)](https://github.com/rc19)[@rc19](https://github.com/rc19)\n\n[![](https://avatars.githubusercontent.com/u/3982077?u=8bbebac42cb84a25c629f83f212b2d099ffa3964&v=4)](https://github.com/anthonychu)[@anthonychu](https://github.com/anthonychu)\n\n[![](https://avatars.githubusercontent.com/u/1664952?u=38196f73e9e69e2cc4f6d2e1207647af87bc440a&v=4)](https://github.com/h3l)[@h3l](https://github.com/h3l)\n\n[![](https://avatars.githubusercontent.com/u/6726111?u=57f5f48085f552366bc8cf19ecd1d4ad0c66cd48&v=4)](https://github.com/JensMadsen)[@JensMadsen](https://github.com/JensMadsen)\n\n[![](https://avatars.githubusercontent.com/u/17705063?v=4)](https://github.com/Raj725)[@Raj725](https://github.com/Raj725)\n\n[![](https://avatars.githubusercontent.com/u/61808204?v=4)](https://github.com/akiradev0x)[@akiradev0x](https://github.com/akiradev0x)\n\n[![](https://avatars.githubusercontent.com/u/160063452?v=4)](https://github.com/fzowl)[@fzowl](https://github.com/fzowl)\n\n[![](https://avatars.githubusercontent.com/u/5136688?u=471ef01a31cc054f84abbe1b9e77ce07b2ac6853&v=4)](https://github.com/mlejva)[@mlejva](https://github.com/mlejva)\n\n[![](https://avatars.githubusercontent.com/u/5564852?u=bb4393ab0f6ea892733e5fa10294207c1cf157f7&v=4)](https://github.com/msetbar)[@msetbar](https://github.com/msetbar)\n\n[![](https://avatars.githubusercontent.com/u/841146?v=4)](https://github.com/DaveDeCaprio)[@DaveDeCaprio](https://github.com/DaveDeCaprio)\n\n[![](https://avatars.githubusercontent.com/u/120141355?u=c114874e969ef4e38c54d042fe1b9a69bc634483&v=4)](https://github.com/j-space-b)[@j-space-b](https://github.com/j-space-b)\n\n[![](https://avatars.githubusercontent.com/u/50950969?u=f0c166782c1b8f63eb983383729b5d109d7bed0a&v=4)](https://github.com/chrispy-snps)[@chrispy-snps](https://github.com/chrispy-snps)\n\n[![](https://avatars.githubusercontent.com/u/1863868?u=b00a9408d1433919780ea3248b3fc21258172152&v=4)](https://github.com/amosjyng)[@amosjyng](https://github.com/amosjyng)\n\n[![](https://avatars.githubusercontent.com/u/33070862?v=4)](https://github.com/cwlacewe)[@cwlacewe](https://github.com/cwlacewe)\n\n[![](https://avatars.githubusercontent.com/u/38786?u=10a7cbcfb424bf45b3858017dc8cffae82adde29&v=4)](https://github.com/ninjapenguin)[@ninjapenguin](https://github.com/ninjapenguin)\n\n[![](https://avatars.githubusercontent.com/u/12752197?u=f4f5d6c5b040422eaa987d0c7f441c65a1266db5&v=4)](https://github.com/dvonthenen)[@dvonthenen](https://github.com/dvonthenen)\n\n[![](https://avatars.githubusercontent.com/u/56083056?v=4)](https://github.com/HamJaw1432)[@HamJaw1432](https://github.com/HamJaw1432)\n\n[![](https://avatars.githubusercontent.com/u/538203?u=b3a13cce34acb23a3ef2808ee54c3461f2fa85bb&v=4)](https://github.com/cristobalcl)[@cristobalcl](https://github.com/cristobalcl)\n\n[![](https://avatars.githubusercontent.com/u/17561003?u=76de0b85da74806eaad024ebc3315201ba49e867&v=4)](https://github.com/krrishdholakia)[@krrishdholakia](https://github.com/krrishdholakia)\n\n[![](https://avatars.githubusercontent.com/u/27777173?u=4490be52549d8b6d2a662f35068b9a0d625b4b66&v=4)](https://github.com/samhita-alla)[@samhita-alla](https://github.com/samhita-alla)\n\n[![](https://avatars.githubusercontent.com/u/3906177?u=3e7cb909eded61c3a35cb0e11336a70d0bc05534&v=4)](https://github.com/ralewis85)[@ralewis85](https://github.com/ralewis85)\n\n[![](https://avatars.githubusercontent.com/u/6785029?v=4)](https://github.com/finnless)[@finnless](https://github.com/finnless)\n\n[![](https://avatars.githubusercontent.com/u/45704090?u=fe471820f7f3939783ddea78efa0ef1f0d86288e&v=4)](https://github.com/felixocker)[@felixocker](https://github.com/felixocker)\n\n[![](https://avatars.githubusercontent.com/u/433221?u=714ae935eadb460e1a7d41d7d29e26c7fed0bbbf&v=4)](https://github.com/brendancol)[@brendancol](https://github.com/brendancol)\n\n[![](https://avatars.githubusercontent.com/u/22055188?u=779840a35ef12f6734b630b1bdedd694132ec68f&v=4)](https://github.com/juliensalinas)[@juliensalinas](https://github.com/juliensalinas)\n\n[![](https://avatars.githubusercontent.com/u/69706702?u=4fe850984b0956793de0a67c7ed9141168942eef&v=4)](https://github.com/muntaqamahmood)[@muntaqamahmood](https://github.com/muntaqamahmood)\n\n[![](https://avatars.githubusercontent.com/u/11441526?u=bbd26dd43cf43212b0b05601ed5aaf29727f5d9f&v=4)](https://github.com/Fei-Wang)[@Fei-Wang](https://github.com/Fei-Wang)\n\n[![](https://avatars.githubusercontent.com/u/45267439?u=d2ad5da7ef06e928644321e7a1cfd16842a897db&v=4)](https://github.com/jupyterjazz)[@jupyterjazz](https://github.com/jupyterjazz)\n\n[![](https://avatars.githubusercontent.com/u/17061663?u=bee0295d999ddb902a98872fac6009bb88950132&v=4)](https://github.com/kooyunmo)[@kooyunmo](https://github.com/kooyunmo)\n\n[![](https://avatars.githubusercontent.com/u/7340008?u=9473b1cdea8b9929771b32f14a28ad702237900c&v=4)](https://github.com/donbr)[@donbr](https://github.com/donbr)\n\n[![](https://avatars.githubusercontent.com/u/22361806?u=c6b2eec689b859aeb182654e5e67936886d860bb&v=4)](https://github.com/jdogmcsteezy)[@jdogmcsteezy](https://github.com/jdogmcsteezy)\n\n[![](https://avatars.githubusercontent.com/u/367522?u=2b439b16d48aaea7f17d1b3b0b24a9cb0b8712ed&v=4)](https://github.com/borisdev)[@borisdev](https://github.com/borisdev)\n\n[![](https://avatars.githubusercontent.com/u/14931371?u=2f570f7591396a1ab8b58777746e2412e154fbfa&v=4)](https://github.com/jasonwcfan)[@jasonwcfan](https://github.com/jasonwcfan)\n\n[![](https://avatars.githubusercontent.com/u/31998003?u=0d91cde56e2c25d8ee7447bc55099e3dad047e99&v=4)](https://github.com/kristapratico)[@kristapratico](https://github.com/kristapratico)\n\n[![](https://avatars.githubusercontent.com/u/46003469?u=4f64d04035d962af0f72d20bffd6ea61635e728e&v=4)](https://github.com/yilmaz-burak)[@yilmaz-burak](https://github.com/yilmaz-burak)\n\n[![](https://avatars.githubusercontent.com/u/8552242?v=4)](https://github.com/yessenzhar)[@yessenzhar](https://github.com/yessenzhar)\n\n[![](https://avatars.githubusercontent.com/u/84070455?v=4)](https://github.com/pjb157)[@pjb157](https://github.com/pjb157)\n\n[![](https://avatars.githubusercontent.com/u/202907?u=a1060b9fd298fd84b1adb7f6874c5c2012e782dc&v=4)](https://github.com/krasserm)[@krasserm](https://github.com/krasserm)\n\n[![](https://avatars.githubusercontent.com/u/8673939?v=4)](https://github.com/NickL77)[@NickL77](https://github.com/NickL77)\n\n[![](https://avatars.githubusercontent.com/u/10400064?u=581d97314df325c15ec221f64834003d3bba5cc1&v=4)](https://github.com/mishushakov)[@mishushakov](https://github.com/mishushakov)\n\n[![](https://avatars.githubusercontent.com/u/1508364?u=e75aca2de6de1a1e57329fc0c6430e1341904318&v=4)](https://github.com/flash1293)[@flash1293](https://github.com/flash1293)\n\n[![](https://avatars.githubusercontent.com/u/6500104?u=c11cdf2671e89749d7d8c01f0d85494cce8d9f84&v=4)](https://github.com/Code-Hex)[@Code-Hex](https://github.com/Code-Hex)\n\n[![](https://avatars.githubusercontent.com/u/22690160?u=50f2d8aa99bd7b12c01df29e8ffe519ed1cff1d5&v=4)](https://github.com/jnis23)[@jnis23](https://github.com/jnis23)\n\n[![](https://avatars.githubusercontent.com/u/36752715?u=5137581b52bcbb8466b394f3ba40f97f9e273f52&v=4)](https://github.com/cgalo5758)[@cgalo5758](https://github.com/cgalo5758)\n\n[![](https://avatars.githubusercontent.com/u/17325195?u=dadc287a6784258704affce9bf91e03e1bb967b4&v=4)](https://github.com/raymond-yuan)[@raymond-yuan](https://github.com/raymond-yuan)\n\n[![](https://avatars.githubusercontent.com/u/101966044?v=4)](https://github.com/klae01)[@klae01](https://github.com/klae01)\n\n[![](https://avatars.githubusercontent.com/u/38317983?u=b169467874aeaf478132e46998ca895accfc008e&v=4)](https://github.com/LunarECL)[@LunarECL](https://github.com/LunarECL)\n\n[![](https://avatars.githubusercontent.com/u/12080578?v=4)](https://github.com/whiskyboy)[@whiskyboy](https://github.com/whiskyboy)\n\n[![](https://avatars.githubusercontent.com/u/66191792?v=4)](https://github.com/yuskhan)[@yuskhan](https://github.com/yuskhan)\n\n[![](https://avatars.githubusercontent.com/u/62583018?u=965202caa3cfc09516af257f0affdf4aae7cdd43&v=4)](https://github.com/akashAD98)[@akashAD98](https://github.com/akashAD98)\n\n[![](https://avatars.githubusercontent.com/u/45953733?u=b907b96d62f8cb2e75f3bba4f137d296d0d8a87f&v=4)](https://github.com/Shrined)[@Shrined](https://github.com/Shrined)\n\n[![](https://avatars.githubusercontent.com/u/17435126?u=62bec61ef256194a3bb3ab238ab71d1792decd08&v=4)](https://github.com/DavidLMS)[@DavidLMS](https://github.com/DavidLMS)\n\n[![](https://avatars.githubusercontent.com/u/4956442?u=fee6c76ff991cc9c12c4d703a1ad007e7634f58e&v=4)](https://github.com/rmkraus)[@rmkraus](https://github.com/rmkraus)\n\n[![](https://avatars.githubusercontent.com/u/20266953?u=32853a0ed47a83525f3f21b4baf63891e0e3de15&v=4)](https://github.com/rawwar)[@rawwar](https://github.com/rawwar)\n\n[![](https://avatars.githubusercontent.com/u/413669?u=25b5563194493db00c227a98e23f460adb13c9ea&v=4)](https://github.com/pmcfadin)[@pmcfadin](https://github.com/pmcfadin)\n\n[![](https://avatars.githubusercontent.com/u/25740077?u=1c3b2b59a52f332dc22ef1787f2cdc67dc9fea5e&v=4)](https://github.com/tricktreat)[@tricktreat](https://github.com/tricktreat)\n\n[![](https://avatars.githubusercontent.com/u/6334158?u=1d02d8cc173b20c7d18e11ac20a6f40081025fc3&v=4)](https://github.com/fzliu)[@fzliu](https://github.com/fzliu)\n\n[![](https://avatars.githubusercontent.com/u/15992114?u=39c8ea0ffb9f48cec04f9b473f2801327e716ba1&v=4)](https://github.com/dongreenberg)[@dongreenberg](https://github.com/dongreenberg)\n\n[![](https://avatars.githubusercontent.com/u/54540938?u=77dbfd10b709e203865f99668a4c79db04a69661&v=4)](https://github.com/aledelunap)[@aledelunap](https://github.com/aledelunap)\n\n[![](https://avatars.githubusercontent.com/u/1155052?v=4)](https://github.com/stonekim)[@stonekim](https://github.com/stonekim)\n\n[![](https://avatars.githubusercontent.com/u/6690727?u=d5742c8e658fe211a8987d9716838c34122485d0&v=4)](https://github.com/tonyabracadabra)[@tonyabracadabra](https://github.com/tonyabracadabra)\n\n[![](https://avatars.githubusercontent.com/u/2857712?u=6809bef8bf07c46b39cd2fcd6027ed86e76372cd&v=4)](https://github.com/machulav)[@machulav](https://github.com/machulav)\n\n[![](https://avatars.githubusercontent.com/u/12604876?u=a441926ef7f4dbc48fc3a1511f3ae5cb4279c464&v=4)](https://github.com/shauryr)[@shauryr](https://github.com/shauryr)\n\n[![](https://avatars.githubusercontent.com/u/42373772?v=4)](https://github.com/PawelFaron)[@PawelFaron](https://github.com/PawelFaron)\n\n[![](https://avatars.githubusercontent.com/u/104267837?u=762d6b00291c68379d66260d7b644942e3bab891&v=4)](https://github.com/lvliang-intel)[@lvliang-intel](https://github.com/lvliang-intel)\n\n[![](https://avatars.githubusercontent.com/u/8972416?u=8cef7c30a819e5157bece1f1e06a50beab52845f&v=4)](https://github.com/xinqiu)[@xinqiu](https://github.com/xinqiu)\n\n[![](https://avatars.githubusercontent.com/u/30035387?u=38717fe5778531ee96e5fc6e4a350668b5024d1c&v=4)](https://github.com/MikeMcGarry)[@MikeMcGarry](https://github.com/MikeMcGarry)\n\n[![](https://avatars.githubusercontent.com/u/20807672?u=f2efe9788ce26442bb3319da1a56081d64c359e5&v=4)](https://github.com/robcaulk)[@robcaulk](https://github.com/robcaulk)\n\n[![](https://avatars.githubusercontent.com/u/37783831?u=5697294c9a0c5bcca4df1aafd22cf8ab64081f2f&v=4)](https://github.com/jagilley)[@jagilley](https://github.com/jagilley)\n\n[![](https://avatars.githubusercontent.com/u/35005448?u=4b6efd3d2dcdc2acde843cff4183b59087f35a9b&v=4)](https://github.com/prrao87)[@prrao87](https://github.com/prrao87)\n\n[![](https://avatars.githubusercontent.com/u/31956487?u=4693ce4d533d97386b62851f6790881306cb88bc&v=4)](https://github.com/lujingxuansc)[@lujingxuansc](https://github.com/lujingxuansc)\n\n[![](https://avatars.githubusercontent.com/u/15329913?u=d6a01e3a63eb3ef04e5917f994fc2f809f28dd13&v=4)](https://github.com/mplachter)[@mplachter](https://github.com/mplachter)\n\n[![](https://avatars.githubusercontent.com/u/46458320?u=f752991f6c37b213ad11fdae5bf7820aa59b93d0&v=4)](https://github.com/jvelezmagic)[@jvelezmagic](https://github.com/jvelezmagic)\n\n[![](https://avatars.githubusercontent.com/u/50772274?u=5d63cb1b53e5702ea3dd12f865c3b9b252f37a02&v=4)](https://github.com/patrickloeber)[@patrickloeber](https://github.com/patrickloeber)\n\n[![](https://avatars.githubusercontent.com/u/16231195?u=cb98dd7c537280ed31b53108f31286bd50989aea&v=4)](https://github.com/trancethehuman)[@trancethehuman](https://github.com/trancethehuman)\n\n[![](https://avatars.githubusercontent.com/u/68764?v=4)](https://github.com/vadimgu)[@vadimgu](https://github.com/vadimgu)\n\n[![](https://avatars.githubusercontent.com/u/146365078?v=4)](https://github.com/hulitaitai)[@hulitaitai](https://github.com/hulitaitai)\n\n[![](https://avatars.githubusercontent.com/u/6885889?u=0b15031859ad908eb11af83878000ab09bed5609&v=4)](https://github.com/cjcjameson)[@cjcjameson](https://github.com/cjcjameson)\n\n[![](https://avatars.githubusercontent.com/u/69208727?u=132c8ca18143866b79253a6fcbc10f58984f61ab&v=4)](https://github.com/aymeric-roucher)[@aymeric-roucher](https://github.com/aymeric-roucher)\n\n[![](https://avatars.githubusercontent.com/u/24295927?u=27eee7ea85bd7dfd9e918245b96de8c757f5a620&v=4)](https://github.com/Sandy247)[@Sandy247](https://github.com/Sandy247)\n\n[![](https://avatars.githubusercontent.com/u/3887295?u=55c8b3263df68b67f9b465c1758c78898f8b163b&v=4)](https://github.com/zoltan-fedor)[@zoltan-fedor](https://github.com/zoltan-fedor)\n\n[![](https://avatars.githubusercontent.com/u/160584887?v=4)](https://github.com/miri-bar)[@miri-bar](https://github.com/miri-bar)\n\n[![](https://avatars.githubusercontent.com/u/19657350?u=9847c9919a636e9d7022803e829ffd80008cb2d3&v=4)](https://github.com/berkedilekoglu)[@berkedilekoglu](https://github.com/berkedilekoglu)\n\n[![](https://avatars.githubusercontent.com/u/55082429?v=4)](https://github.com/maang-h)[@maang-h](https://github.com/maang-h)\n\n[![](https://avatars.githubusercontent.com/u/141281053?u=e3ff32e9ae51ff0cca84b482fc1e6c80c28ab0c6&v=4)](https://github.com/rodrigo-clickup)[@rodrigo-clickup](https://github.com/rodrigo-clickup)\n\n[![](https://avatars.githubusercontent.com/u/35718120?u=af59f3ac14a23d1f2e09942415ac07c10f3a3d05&v=4)](https://github.com/numb3r3)[@numb3r3](https://github.com/numb3r3)\n\n[![](https://avatars.githubusercontent.com/u/42609308?u=3f7f530d338e33205815639ad3dfe7c244455728&v=4)](https://github.com/svdeepak99)[@svdeepak99](https://github.com/svdeepak99)\n\n[![](https://avatars.githubusercontent.com/u/97558871?v=4)](https://github.com/ZyeG)[@ZyeG](https://github.com/ZyeG)\n\n[![](https://avatars.githubusercontent.com/u/30483654?u=95e2c59c64c99e4ba77cffb8b2c180f7b44c6a74&v=4)](https://github.com/NoahStapp)[@NoahStapp](https://github.com/NoahStapp)\n\n[![](https://avatars.githubusercontent.com/u/709022?v=4)](https://github.com/tconkling)[@tconkling](https://github.com/tconkling)\n\n[![](https://avatars.githubusercontent.com/u/986859?u=54d240cfd5355bb0cfdaf4ac0a9589963ae9ccab&v=4)](https://github.com/toshish)[@toshish](https://github.com/toshish)\n\n[![](https://avatars.githubusercontent.com/u/1087039?u=4439c00ef507bef0a99d82cdec33d6d0ed53d67c&v=4)](https://github.com/dremeika)[@dremeika](https://github.com/dremeika)\n\n[![](https://avatars.githubusercontent.com/u/49049296?u=26427e6e1aa0a8ac20cc10594664b59a017f5287&v=4)](https://github.com/mingkang111)[@mingkang111](https://github.com/mingkang111)\n\n[![](https://avatars.githubusercontent.com/u/13622183?u=c23256501191447d645cc03c1f6bc83282ef1498&v=4)](https://github.com/liaokongVFX)[@liaokongVFX](https://github.com/liaokongVFX)\n\n[![](https://avatars.githubusercontent.com/u/36044389?u=e669016609aeb3e08e4f2a50f4faa163d633c073&v=4)](https://github.com/0xRaduan)[@0xRaduan](https://github.com/0xRaduan)\n\n[![](https://avatars.githubusercontent.com/u/127370261?v=4)](https://github.com/apeng-singlestore)[@apeng-singlestore](https://github.com/apeng-singlestore)\n\n[![](https://avatars.githubusercontent.com/u/252377?v=4)](https://github.com/jeffkit)[@jeffkit](https://github.com/jeffkit)\n\n[![](https://avatars.githubusercontent.com/u/158216624?v=4)](https://github.com/xsai9101)[@xsai9101](https://github.com/xsai9101)\n\n[![](https://avatars.githubusercontent.com/u/38943595?v=4)](https://github.com/issam9)[@issam9](https://github.com/issam9)\n\n[![](https://avatars.githubusercontent.com/u/56953648?v=4)](https://github.com/Dobiichi-Origami)[@Dobiichi-Origami](https://github.com/Dobiichi-Origami)\n\n[![](https://avatars.githubusercontent.com/u/131272471?v=4)](https://github.com/CogniJT)[@CogniJT](https://github.com/CogniJT)\n\n[![](https://avatars.githubusercontent.com/u/87355704?u=e98091da04c6bfe9af8d982938556832f03fb1fb&v=4)](https://github.com/ivyas21)[@ivyas21](https://github.com/ivyas21)\n\n[![](https://avatars.githubusercontent.com/u/90619575?u=a99d480b1238cfdb2dabcd2fe60d1110518049d9&v=4)](https://github.com/florian-morel22)[@florian-morel22](https://github.com/florian-morel22)\n\n[![](https://avatars.githubusercontent.com/u/22898443?u=4e6aceb9132747788c4b6aca6c16027ee1109b01&v=4)](https://github.com/sdan)[@sdan](https://github.com/sdan)\n\n[![](https://avatars.githubusercontent.com/u/16283396?v=4)](https://github.com/samching)[@samching](https://github.com/samching)\n\n[![](https://avatars.githubusercontent.com/u/306671?u=27f910f1bdcdf18622fcccc138274be885cf1058&v=4)](https://github.com/lukestanley)[@lukestanley](https://github.com/lukestanley)\n\n[![](https://avatars.githubusercontent.com/u/63134180?v=4)](https://github.com/IlyaKIS1)[@IlyaKIS1](https://github.com/IlyaKIS1)\n\n[![](https://avatars.githubusercontent.com/u/4432788?u=6883ca123ef6ea5c06b6353183e4f92574b4e152&v=4)](https://github.com/dosuken123)[@dosuken123](https://github.com/dosuken123)\n\n[![](https://avatars.githubusercontent.com/u/356014?u=51c0f2becf914c1cb7fce2d2f184a9d0ae89eae7&v=4)](https://github.com/wietsevenema)[@wietsevenema](https://github.com/wietsevenema)\n\n[![](https://avatars.githubusercontent.com/u/157405112?u=f34aa80161ad2eab0db9255661f4bd7d685cbd0c&v=4)](https://github.com/gustavo-yt)[@gustavo-yt](https://github.com/gustavo-yt)\n\n[![](https://avatars.githubusercontent.com/u/93204286?u=4b965586800fef342c6235fec47e9185b8ec1f81&v=4)](https://github.com/jonathanalgar)[@jonathanalgar](https://github.com/jonathanalgar)\n\n[![](https://avatars.githubusercontent.com/u/28803103?u=c0b795ec14b5536f0e757faf1eca1c1900d1ef3c&v=4)](https://github.com/vsxd)[@vsxd](https://github.com/vsxd)\n\n[![](https://avatars.githubusercontent.com/u/17221195?u=6182ec534d25d1c9ffe1667bd78ea28fd0eea4c8&v=4)](https://github.com/var77)[@var77](https://github.com/var77)\n\n[![](https://avatars.githubusercontent.com/u/54343137?u=0b69859aa8f8e5145d6fda66985a5c8a82c77524&v=4)](https://github.com/L-cloud)[@L-cloud](https://github.com/L-cloud)\n\n[![](https://avatars.githubusercontent.com/u/88005863?v=4)](https://github.com/matiasjacob25)[@matiasjacob25](https://github.com/matiasjacob25)\n\n[![](https://avatars.githubusercontent.com/u/1222232?v=4)](https://github.com/IlyaMichlin)[@IlyaMichlin](https://github.com/IlyaMichlin)\n\n[![](https://avatars.githubusercontent.com/u/6346981?u=8ae43f7d588ffcc184df5948d2d034cc29dc1d7d&v=4)](https://github.com/dzmitry-kankalovich)[@dzmitry-kankalovich](https://github.com/dzmitry-kankalovich)\n\n[![](https://avatars.githubusercontent.com/u/13366849?u=9f66646c23def822aac7d3dfecb49369bc8cdf7b&v=4)](https://github.com/EniasCailliau)[@EniasCailliau](https://github.com/EniasCailliau)\n\n[![](https://avatars.githubusercontent.com/u/68635?u=0ebec81cc881b2428e2c45e549a1081e5fe3cddf&v=4)](https://github.com/kreneskyp)[@kreneskyp](https://github.com/kreneskyp)\n\n[![](https://avatars.githubusercontent.com/u/4441850?u=532666e949309d38a33cda7b1e8b5f30fee0ef7c&v=4)](https://github.com/rsharath)[@rsharath](https://github.com/rsharath)\n\n[![](https://avatars.githubusercontent.com/u/21039333?u=bba2c2d18d3a5ef41360778a7679662565f326d2&v=4)](https://github.com/izapolsk)[@izapolsk](https://github.com/izapolsk)\n\n[![](https://avatars.githubusercontent.com/u/30639818?v=4)](https://github.com/rjadr)[@rjadr](https://github.com/rjadr)\n\n[![](https://avatars.githubusercontent.com/u/17973367?u=135d566bd1e620e230b94bf5252acea571ba510f&v=4)](https://github.com/Lord-Haji)[@Lord-Haji](https://github.com/Lord-Haji)\n\n[![](https://avatars.githubusercontent.com/u/85796?u=d66bb48107582804e6665cd33540cce5dea2fd8b&v=4)](https://github.com/woodworker)[@woodworker](https://github.com/woodworker)\n\n[![](https://avatars.githubusercontent.com/u/32632186?u=3e1b1b0d8cc37c998508e3ab83dc20ef1e2f57e0&v=4)](https://github.com/philschmid)[@philschmid](https://github.com/philschmid)\n\n[![](https://avatars.githubusercontent.com/u/13198452?v=4)](https://github.com/ChrKahl)[@ChrKahl](https://github.com/ChrKahl)\n\n[![](https://avatars.githubusercontent.com/u/8433665?u=1c39439298436f2acaa30c21863e02d3ba13af02&v=4)](https://github.com/bongsang)[@bongsang](https://github.com/bongsang)\n\n[![](https://avatars.githubusercontent.com/u/49571870?v=4)](https://github.com/clwillhuang)[@clwillhuang](https://github.com/clwillhuang)\n\n[![](https://avatars.githubusercontent.com/u/3122709?u=55c1160c7f870bcc582d2e0be42d5b1054262e04&v=4)](https://github.com/BidhanRoy)[@BidhanRoy](https://github.com/BidhanRoy)\n\n[![](https://avatars.githubusercontent.com/u/108248080?v=4)](https://github.com/proximal-phalanx)[@proximal-phalanx](https://github.com/proximal-phalanx)\n\n[![](https://avatars.githubusercontent.com/u/26385522?v=4)](https://github.com/hiigao)[@hiigao](https://github.com/hiigao)\n\n[![](https://avatars.githubusercontent.com/u/152659506?v=4)](https://github.com/samkhano1)[@samkhano1](https://github.com/samkhano1)\n\n[![](https://avatars.githubusercontent.com/u/45119610?u=27b4bbe257e0cc055c70f05dc6f45e95d5b09d08&v=4)](https://github.com/ireneisdoomed)[@ireneisdoomed](https://github.com/ireneisdoomed)\n\n[![](https://avatars.githubusercontent.com/u/12946725?u=42a21426742352cfbc210619eed7e76bc1bb5b22&v=4)](https://github.com/mahaddad)[@mahaddad](https://github.com/mahaddad)\n\n[![](https://avatars.githubusercontent.com/u/8368470?u=1b7aebda11db89d56b90ff89f9b108e3cd8bffe5&v=4)](https://github.com/thehapyone)[@thehapyone](https://github.com/thehapyone)\n\n[![](https://avatars.githubusercontent.com/u/18024571?u=c0e12c9590b7e0838b4ab96544bc875e08db0729&v=4)](https://github.com/tomhamer)[@tomhamer](https://github.com/tomhamer)\n\n[![](https://avatars.githubusercontent.com/u/1282617?u=940c2e3a241c82af68edc6adf81bc5da0fef0bbe&v=4)](https://github.com/haoch)[@haoch](https://github.com/haoch)\n\n[![](https://avatars.githubusercontent.com/u/32279503?u=b760deecdb05c098c0e4e19944b72bc22c6487dc&v=4)](https://github.com/SlapDrone)[@SlapDrone](https://github.com/SlapDrone)\n\n[![](https://avatars.githubusercontent.com/u/4302268?u=69a5af6602ab4faa803dcf60b2c50ed33cf44d89&v=4)](https://github.com/taranjeet)[@taranjeet](https://github.com/taranjeet)\n\n[![](https://avatars.githubusercontent.com/u/7312176?u=d986a46c4971c5d15feea254801efc5deb0bc358&v=4)](https://github.com/Pixeladed)[@Pixeladed](https://github.com/Pixeladed)\n\n[![](https://avatars.githubusercontent.com/u/8475708?v=4)](https://github.com/mlot)[@mlot](https://github.com/mlot)\n\n[![](https://avatars.githubusercontent.com/u/7282984?u=5e843c8eca6ff699d7a9e8b73f63b3f6dadcce04&v=4)](https://github.com/JGalego)[@JGalego](https://github.com/JGalego)\n\n[![](https://avatars.githubusercontent.com/u/21073184?u=deed6fe562ed425be66c210398811b664b5039a2&v=4)](https://github.com/xieqihui)[@xieqihui](https://github.com/xieqihui)\n\n[![](https://avatars.githubusercontent.com/u/9324867?v=4)](https://github.com/mhavey)[@mhavey](https://github.com/mhavey)\n\n[![](https://avatars.githubusercontent.com/u/4526224?u=3a47513ee686870ddcbecaa70756e3e8224732af&v=4)](https://github.com/praveenv)[@praveenv](https://github.com/praveenv)\n\n[![](https://avatars.githubusercontent.com/u/1734012?u=105d7344bcd5c0dee1a293d2740cefa05cc46b9b&v=4)](https://github.com/srics)[@srics](https://github.com/srics)\n\n[![](https://avatars.githubusercontent.com/u/31218485?u=6ce575b365c0353b5b3d1ea03088f8da36764100&v=4)](https://github.com/16BitNarwhal)[@16BitNarwhal](https://github.com/16BitNarwhal)\n\n[![](https://avatars.githubusercontent.com/u/12967560?v=4)](https://github.com/zhangch9)[@zhangch9](https://github.com/zhangch9)\n\n[![](https://avatars.githubusercontent.com/u/37284051?u=6a4bc9b65700fc4835aebec6bf6aab77acdaa233&v=4)](https://github.com/paulonasc)[@paulonasc](https://github.com/paulonasc)\n\n[![](https://avatars.githubusercontent.com/u/2008740?u=4c8824a259e14e56c2d3501e32a3422b258704c5&v=4)](https://github.com/rubell)[@rubell](https://github.com/rubell)\n\n[![](https://avatars.githubusercontent.com/u/37992436?u=21693d9e841c3b7f9f091a210fbeee7e415a0751&v=4)](https://github.com/izzymsft)[@izzymsft](https://github.com/izzymsft)\n\n[![](https://avatars.githubusercontent.com/u/22676399?u=6b46c5acfe16b722badbfa6845516c1627171bbe&v=4)](https://github.com/richarda23)[@richarda23](https://github.com/richarda23)\n\n[![](https://avatars.githubusercontent.com/u/7711036?v=4)](https://github.com/zifeiq)[@zifeiq](https://github.com/zifeiq)\n\n[![](https://avatars.githubusercontent.com/u/56812134?v=4)](https://github.com/liuyonghengheng)[@liuyonghengheng](https://github.com/liuyonghengheng)\n\n[![](https://avatars.githubusercontent.com/u/18428646?u=d26db3c0411bd1d62c1dca99e5c86dd1f7a3b53d&v=4)](https://github.com/tomaspiaggio)[@tomaspiaggio](https://github.com/tomaspiaggio)\n\n[![](https://avatars.githubusercontent.com/u/71321890?u=71a53f3a743fb8a91733e2a4cfcc05e309e3ef87&v=4)](https://github.com/klaus-xiong)[@klaus-xiong](https://github.com/klaus-xiong)\n\n[![](https://avatars.githubusercontent.com/u/16155041?u=bf86e1dd4aaeccde8ccf12bf8c16c494644b84e1&v=4)](https://github.com/alallema)[@alallema](https://github.com/alallema)\n\n[![](https://avatars.githubusercontent.com/u/8777479?v=4)](https://github.com/fengjial)[@fengjial](https://github.com/fengjial)\n\n[![](https://avatars.githubusercontent.com/u/18065113?u=6ea1812de26ecb108c18e50b719a109049d93ce2&v=4)](https://github.com/simon824)[@simon824](https://github.com/simon824)\n\n[![](https://avatars.githubusercontent.com/u/28787976?u=07c76df6dce5d38c056fb0783128844e6c70f4c4&v=4)](https://github.com/AksAman)[@AksAman](https://github.com/AksAman)\n\n[![](https://avatars.githubusercontent.com/u/14037726?u=e91cfcdb7606db58b059893368f3cf70a2340f5f&v=4)](https://github.com/mewim)[@mewim](https://github.com/mewim)\n\n[![](https://avatars.githubusercontent.com/u/4874?v=4)](https://github.com/ruanwz)[@ruanwz](https://github.com/ruanwz)\n\n[![](https://avatars.githubusercontent.com/u/1921353?v=4)](https://github.com/gdedrouas)[@gdedrouas](https://github.com/gdedrouas)\n\n[![](https://avatars.githubusercontent.com/u/1917451?u=f0d78c43c1f2d4bed080f9a8c46905d3c22a28c7&v=4)](https://github.com/mariokostelac)[@mariokostelac](https://github.com/mariokostelac)\n\n[![](https://avatars.githubusercontent.com/u/6432132?v=4)](https://github.com/samnoyes)[@samnoyes](https://github.com/samnoyes)\n\n[![](https://avatars.githubusercontent.com/u/22236370?u=289c19bfc89a43a7e0c6956f73305aab3a8bd978&v=4)](https://github.com/mosheber)[@mosheber](https://github.com/mosheber)\n\n[![](https://avatars.githubusercontent.com/u/8844262?u=1f09d2fe41756368730c3684fc819fbad940b4ac&v=4)](https://github.com/laplaceon)[@laplaceon](https://github.com/laplaceon)\n\n[![](https://avatars.githubusercontent.com/u/11781950?u=a34a78ac4d9dcc25fd084f423566c9443c2cc47d&v=4)](https://github.com/thepycoder)[@thepycoder](https://github.com/thepycoder)\n\n[![](https://avatars.githubusercontent.com/u/42592581?v=4)](https://github.com/toddkim95)[@toddkim95](https://github.com/toddkim95)\n\n[![](https://avatars.githubusercontent.com/u/950938?u=5283ce0f42f555abe0cd3eb9e45d23206c2ba6b8&v=4)](https://github.com/agamble)[@agamble](https://github.com/agamble)\n\n[![](https://avatars.githubusercontent.com/u/13607221?u=dcea34602eda8e96ea684d231bd5b597ba0c1a4f&v=4)](https://github.com/KastanDay)[@KastanDay](https://github.com/KastanDay)\n\n[![](https://avatars.githubusercontent.com/u/931697?u=4ce45d183c52828da0b4f0ca298d67ad970d43f6&v=4)](https://github.com/seanaedmiston)[@seanaedmiston](https://github.com/seanaedmiston)\n\n[![](https://avatars.githubusercontent.com/u/3028543?u=5096311a70425e82c9b1a143d29ccd502c155a7f&v=4)](https://github.com/Randl)[@Randl](https://github.com/Randl)\n\n[![](https://avatars.githubusercontent.com/u/115017354?v=4)](https://github.com/NikolaosPapailiou)[@NikolaosPapailiou](https://github.com/NikolaosPapailiou)\n\n[![](https://avatars.githubusercontent.com/u/460966?v=4)](https://github.com/ebrehault)[@ebrehault](https://github.com/ebrehault)\n\n[![](https://avatars.githubusercontent.com/u/32112894?u=d317c16ef9614adbeb3cf18ac39239c585db2264&v=4)](https://github.com/santiagxf)[@santiagxf](https://github.com/santiagxf)\n\n[![](https://avatars.githubusercontent.com/u/30162978?v=4)](https://github.com/thehappydinoa)[@thehappydinoa](https://github.com/thehappydinoa)\n\n[![](https://avatars.githubusercontent.com/u/30344258?u=51c169c8996024b68e9b3ec0bfe93465940dc8b4&v=4)](https://github.com/LMC117)[@LMC117](https://github.com/LMC117)\n\n[![](https://avatars.githubusercontent.com/u/131612909?v=4)](https://github.com/WilliamEspegren)[@WilliamEspegren](https://github.com/WilliamEspegren)\n\n[![](https://avatars.githubusercontent.com/u/7380988?u=ba9beadb7fd3bcd6d8439154bedbd32d5fdbd4d8&v=4)](https://github.com/sunbc0120)[@sunbc0120](https://github.com/sunbc0120)\n\n[![](https://avatars.githubusercontent.com/u/18614423?u=1d3dba8e4e87d2a449cc90c204f422327af2d09d&v=4)](https://github.com/Simon-Stone)[@Simon-Stone](https://github.com/Simon-Stone)\n\n[![](https://avatars.githubusercontent.com/u/15304273?u=7588e8d8f8a889950b0afd00c2457ec3126ce8f6&v=4)](https://github.com/Amyh102)[@Amyh102](https://github.com/Amyh102)\n\n[![](https://avatars.githubusercontent.com/u/67831673?v=4)](https://github.com/shumway743)[@shumway743](https://github.com/shumway743)\n\n[![](https://avatars.githubusercontent.com/u/12097018?u=ef0ff38c5959d7e7acf2c87e8e8051ca2d047c76&v=4)](https://github.com/gcheron)[@gcheron](https://github.com/gcheron)\n\n[![](https://avatars.githubusercontent.com/u/7102288?u=52db4849a0136c1d78cbc5a5de99ee0073384300&v=4)](https://github.com/zachdj)[@zachdj](https://github.com/zachdj)\n\n[![](https://avatars.githubusercontent.com/u/6980212?u=89202482380b379837fd7318dde75a00e83d2459&v=4)](https://github.com/ehsanmok)[@ehsanmok](https://github.com/ehsanmok)\n\n[![](https://avatars.githubusercontent.com/u/16619882?u=ed851c7ccfa20588d3cd5ca47e79d94c3e4b6427&v=4)](https://github.com/Trevato)[@Trevato](https://github.com/Trevato)\n\n[![](https://avatars.githubusercontent.com/u/13738772?u=1685c6916759c2ec986434af557343f6b29bce32&v=4)](https://github.com/raoufchebri)[@raoufchebri](https://github.com/raoufchebri)\n\n[![](https://avatars.githubusercontent.com/u/492616?u=c2ecf6dac54322df081577f6b8e1ca390535c4a6&v=4)](https://github.com/delgermurun)[@delgermurun](https://github.com/delgermurun)\n\n[![](https://avatars.githubusercontent.com/u/9665243?u=e403da70029d61dbbb9a2f0e03daebc5418974ed&v=4)](https://github.com/jcjc712)[@jcjc712](https://github.com/jcjc712)\n\n[![](https://avatars.githubusercontent.com/u/9089568?u=d2f8bc466003afc3558a96f3266a0e32d5c18c34&v=4)](https://github.com/EvilFreelancer)[@EvilFreelancer](https://github.com/EvilFreelancer)\n\n[![](https://avatars.githubusercontent.com/u/32046231?u=db454b8e6da48120d78d3397006928cc86f01019&v=4)](https://github.com/zywilliamli)[@zywilliamli](https://github.com/zywilliamli)\n\n[![](https://avatars.githubusercontent.com/u/48098520?u=aa4a7287f484eb32d408360ca340c2f5bc8444d0&v=4)](https://github.com/thaiminhpv)[@thaiminhpv](https://github.com/thaiminhpv)\n\n[![](https://avatars.githubusercontent.com/u/8139170?u=a63f55e62ad26febcd94e193c22bfd867d022af2&v=4)](https://github.com/paperMoose)[@paperMoose](https://github.com/paperMoose)\n\n[![](https://avatars.githubusercontent.com/u/71520361?v=4)](https://github.com/younis-bash)[@younis-bash](https://github.com/younis-bash)\n\n[![](https://avatars.githubusercontent.com/u/16340036?v=4)](https://github.com/rajib76)[@rajib76](https://github.com/rajib76)\n\n[![](https://avatars.githubusercontent.com/u/11153261?u=a5af26e0bd60a27ba4aba60d15b129fc410fe8cc&v=4)](https://github.com/ihpolash)[@ihpolash](https://github.com/ihpolash)\n\n[![](https://avatars.githubusercontent.com/u/123224380?v=4)](https://github.com/scadEfUr)[@scadEfUr](https://github.com/scadEfUr)\n\n[![](https://avatars.githubusercontent.com/u/51324450?u=25a4838c93e6237e3b6d6ea1fbd23442cfba5723&v=4)](https://github.com/SauhaardW)[@SauhaardW](https://github.com/SauhaardW)\n\n[![](https://avatars.githubusercontent.com/u/119924780?v=4)](https://github.com/pranava-amzn)[@pranava-amzn](https://github.com/pranava-amzn)\n\n[![](https://avatars.githubusercontent.com/u/16321871?u=9342b5e86b1e6c257e4024bed7e285470f466b8c&v=4)](https://github.com/fynnfluegge)[@fynnfluegge](https://github.com/fynnfluegge)\n\n[![](https://avatars.githubusercontent.com/u/2469198?u=43a8a9e376a5a7db6972e720906fd6f66560d235&v=4)](https://github.com/adilansari)[@adilansari](https://github.com/adilansari)\n\n[![](https://avatars.githubusercontent.com/u/13305222?u=6d00fe3cfd2414a9e309540fe49f532fc0e503dd&v=4)](https://github.com/bstadt)[@bstadt](https://github.com/bstadt)\n\n[![](https://avatars.githubusercontent.com/in/29110?v=4)](https://github.com/apps/dependabot)[@dependabot](https://github.com/apps/dependabot)\n\n[![](https://avatars.githubusercontent.com/u/42089598?v=4)](https://github.com/PenghuiCheng)[@PenghuiCheng](https://github.com/PenghuiCheng)\n\n[![](https://avatars.githubusercontent.com/u/145396613?u=f0da33ee8d74a5353a43f8df3332c9cac2bd70f8&v=4)](https://github.com/giannis2two)[@giannis2two](https://github.com/giannis2two)\n\n[![](https://avatars.githubusercontent.com/u/107621925?u=4a7b06f4c0cac2534521698383f58331c00c093f&v=4)](https://github.com/anilaltuner)[@anilaltuner](https://github.com/anilaltuner)\n\n[![](https://avatars.githubusercontent.com/u/144132509?u=42f5528898e3f4e3790bf432b8ca662dc347c778&v=4)](https://github.com/bu2kx)[@bu2kx](https://github.com/bu2kx)\n\n[![](https://avatars.githubusercontent.com/u/32715913?u=5de749a141259c3fdd8a16c6438aff2b7823fd69&v=4)](https://github.com/AmineDjeghri)[@AmineDjeghri](https://github.com/AmineDjeghri)\n\n[![](https://avatars.githubusercontent.com/u/1918816?v=4)](https://github.com/bakebrain)[@bakebrain](https://github.com/bakebrain)\n\n[![](https://avatars.githubusercontent.com/u/5349024?u=4875b6589899edb51cb083d209bd9fbfac58da18&v=4)](https://github.com/bburgin)[@bburgin](https://github.com/bburgin)\n\n[![](https://avatars.githubusercontent.com/u/2806769?u=2969d39e1099584bc34b9e91a718f97107b38cbc&v=4)](https://github.com/sreiswig)[@sreiswig](https://github.com/sreiswig)\n\n[![](https://avatars.githubusercontent.com/u/134934501?u=167199ff0bff447057fc5e291be0225ad5260111&v=4)](https://github.com/vrushankportkey)[@vrushankportkey](https://github.com/vrushankportkey)\n\n[![](https://avatars.githubusercontent.com/u/4852235?u=69b6d23a20085d57e304196e304cfd06f3393f3d&v=4)](https://github.com/jxnl)[@jxnl](https://github.com/jxnl)\n\n[![](https://avatars.githubusercontent.com/u/8412519?u=391d663c51163f604c14bc625f4d6c11042a0c36&v=4)](https://github.com/arron2003)[@arron2003](https://github.com/arron2003)\n\n[![](https://avatars.githubusercontent.com/u/17466553?u=2510816fc74e11bb543f54f97afe1c78e9bda720&v=4)](https://github.com/HashemAlsaket)[@HashemAlsaket](https://github.com/HashemAlsaket)\n\n[![](https://avatars.githubusercontent.com/u/1555858?v=4)](https://github.com/prakul)[@prakul](https://github.com/prakul)\n\n[![](https://avatars.githubusercontent.com/u/20924562?u=3f61dc32f82124727d7157c0977240770ab82c02&v=4)](https://github.com/ea-open-source)[@ea-open-source](https://github.com/ea-open-source)\n\n[![](https://avatars.githubusercontent.com/u/1473079?v=4)](https://github.com/constantinmusca)[@constantinmusca](https://github.com/constantinmusca)\n\n[![](https://avatars.githubusercontent.com/u/74497693?u=0d49e69abc1f1c5299d479d943285fcac7eee1ae&v=4)](https://github.com/Subsegment)[@Subsegment](https://github.com/Subsegment)\n\n[![](https://avatars.githubusercontent.com/u/15026857?u=a5129b6393cb746e25fca20655458d248ec4f05d&v=4)](https://github.com/zrcni)[@zrcni](https://github.com/zrcni)\n\n[![](https://avatars.githubusercontent.com/u/191493?u=3e803364d95e760cafa108ab29ee109ba0e0af83&v=4)](https://github.com/piizei)[@piizei](https://github.com/piizei)\n\n[![](https://avatars.githubusercontent.com/u/58871401?u=81f900fd6c286d9e8c5c8673f68b88387ed491e5&v=4)](https://github.com/RohanDey02)[@RohanDey02](https://github.com/RohanDey02)\n\n[![](https://avatars.githubusercontent.com/u/57868915?v=4)](https://github.com/SuperJokerayo)[@SuperJokerayo](https://github.com/SuperJokerayo)\n\n[![](https://avatars.githubusercontent.com/u/14224983?u=2a696ae181971f12ace4f252b759e1ca75ccdb44&v=4)](https://github.com/demjened)[@demjened](https://github.com/demjened)\n\n[![](https://avatars.githubusercontent.com/u/3285355?u=8f91986cb97c2efcd84d62e339d8be43562de13d&v=4)](https://github.com/killinsun)[@killinsun](https://github.com/killinsun)\n\n[![](https://avatars.githubusercontent.com/u/291370?u=5802ab31e0feb7ae15465dedaa48ba646f0a4127&v=4)](https://github.com/sanzgiri)[@sanzgiri](https://github.com/sanzgiri)\n\n[![](https://avatars.githubusercontent.com/u/1621509?u=e54d671ddef5ac7580003427246fc2247964c9ed&v=4)](https://github.com/MacanPN)[@MacanPN](https://github.com/MacanPN)\n\n[![](https://avatars.githubusercontent.com/u/6872942?v=4)](https://github.com/wlleiiwang)[@wlleiiwang](https://github.com/wlleiiwang)\n\n[![](https://avatars.githubusercontent.com/u/20760062?u=422c372863e9c42406db2241e41cc52c522431ef&v=4)](https://github.com/abdalrohman)[@abdalrohman](https://github.com/abdalrohman)\n\n[![](https://avatars.githubusercontent.com/u/3118964?u=471d785af68097fa9edeaa7bcd130b56ddda6338&v=4)](https://github.com/coyotespike)[@coyotespike](https://github.com/coyotespike)\n\n[![](https://avatars.githubusercontent.com/u/1039756?u=1e32f3165c823547362784b17f65f7690b56e0b0&v=4)](https://github.com/zchenyu)[@zchenyu](https://github.com/zchenyu)\n\n[![](https://avatars.githubusercontent.com/u/83261447?v=4)](https://github.com/yuwenzho)[@yuwenzho](https://github.com/yuwenzho)\n\n[![](https://avatars.githubusercontent.com/u/132831962?u=d91bc0c46bc4c4df36d752076418530eea55a5dc&v=4)](https://github.com/ricki-epsilla)[@ricki-epsilla](https://github.com/ricki-epsilla)\n\n[![](https://avatars.githubusercontent.com/u/2914618?v=4)](https://github.com/HassanOuda)[@HassanOuda](https://github.com/HassanOuda)\n\n[![](https://avatars.githubusercontent.com/u/2215597?u=d5558c7d5c1ab6d4a8e5381826abd1f00371a5be&v=4)](https://github.com/s-udhaya)[@s-udhaya](https://github.com/s-udhaya)\n\n[![](https://avatars.githubusercontent.com/u/5522060?v=4)](https://github.com/tesfagabir)[@tesfagabir](https://github.com/tesfagabir)\n\n[![](https://avatars.githubusercontent.com/u/56334152?v=4)](https://github.com/chocolate4)[@chocolate4](https://github.com/chocolate4)\n\n[![](https://avatars.githubusercontent.com/u/13938372?u=0e3f80aa515c41b7d9084b73d761cad378ebdc7a&v=4)](https://github.com/jasondotparse)[@jasondotparse](https://github.com/jasondotparse)\n\n[![](https://avatars.githubusercontent.com/u/12449236?u=f13eba9cfa9baf8fa9a0fce667eb2fe429ecd298&v=4)](https://github.com/bwmatson)[@bwmatson](https://github.com/bwmatson)\n\n[![](https://avatars.githubusercontent.com/u/38718601?u=44687611a0b7bd160ee129d04d4220d98f32ebab&v=4)](https://github.com/Daggx)[@Daggx](https://github.com/Daggx)\n\n[![](https://avatars.githubusercontent.com/u/78627776?u=7fd9922950b898ab502666f2cea155cf0200fe5f&v=4)](https://github.com/isahers1)[@isahers1](https://github.com/isahers1)\n\n[![](https://avatars.githubusercontent.com/u/848849?v=4)](https://github.com/seth-hg)[@seth-hg](https://github.com/seth-hg)\n\n[![](https://avatars.githubusercontent.com/u/34580718?u=cf4ff62610ff72ad9580d328e38f32e306d6150f&v=4)](https://github.com/NolanTrem)[@NolanTrem](https://github.com/NolanTrem)\n\n[![](https://avatars.githubusercontent.com/u/9007876?v=4)](https://github.com/mpb159753)[@mpb159753](https://github.com/mpb159753)\n\n[![](https://avatars.githubusercontent.com/u/800430?v=4)](https://github.com/mikeknoop)[@mikeknoop](https://github.com/mikeknoop)\n\n[![](https://avatars.githubusercontent.com/u/57349093?v=4)](https://github.com/datelier)[@datelier](https://github.com/datelier)\n\n[![](https://avatars.githubusercontent.com/u/13024750?u=6ae631199ec7c0bb34eb8d56200023cdd94720d3&v=4)](https://github.com/JamsheedMistri)[@JamsheedMistri](https://github.com/JamsheedMistri)\n\n[![](https://avatars.githubusercontent.com/u/42374034?u=cfb14ff1a7c4f0a500cd9c282bc3fbcba170daef&v=4)](https://github.com/atherfawaz)[@atherfawaz](https://github.com/atherfawaz)\n\n[![](https://avatars.githubusercontent.com/u/6012338?u=198f10817236beac03b10bb8f5cc6d7fcb133cc7&v=4)](https://github.com/Hugoberry)[@Hugoberry](https://github.com/Hugoberry)\n\n[![](https://avatars.githubusercontent.com/u/54216004?u=6a387166a0e8599c4f3ff35f61c12458df539f96&v=4)](https://github.com/Haris-Ali007)[@Haris-Ali007](https://github.com/Haris-Ali007)\n\n[![](https://avatars.githubusercontent.com/u/52078762?v=4)](https://github.com/AlpinDale)[@AlpinDale](https://github.com/AlpinDale)\n\n[![](https://avatars.githubusercontent.com/u/70274018?u=b6d5fd627cd26f590ed442d4dffa5bdddcb803cc&v=4)](https://github.com/jjovalle99)[@jjovalle99](https://github.com/jjovalle99)\n\n[![](https://avatars.githubusercontent.com/u/7529846?u=bd1b12fa55583ac7f01c4440cad87163a0fe3c19&v=4)](https://github.com/DN6)[@DN6](https://github.com/DN6)\n\n[![](https://avatars.githubusercontent.com/u/83648453?u=8557d590ff3516d093da32689816e898a08245ce&v=4)](https://github.com/spike-spiegel-21)[@spike-spiegel-21](https://github.com/spike-spiegel-21)\n\n[![](https://avatars.githubusercontent.com/u/91102080?u=c87d3f88e6b05445a121c204a0d39a0b9ec17e05&v=4)](https://github.com/mziru)[@mziru](https://github.com/mziru)\n\n[![](https://avatars.githubusercontent.com/u/56706206?v=4)](https://github.com/Dylan20XX)[@Dylan20XX](https://github.com/Dylan20XX)\n\n[![](https://avatars.githubusercontent.com/u/8936233?u=07eb2625319cd0fd18df747fcdeef42cd9fc981d&v=4)](https://github.com/xingfanxia)[@xingfanxia](https://github.com/xingfanxia)\n\n[![](https://avatars.githubusercontent.com/u/74933942?u=a952add7652d59815f24581d83f504216780521b&v=4)](https://github.com/0xJord4n)[@0xJord4n](https://github.com/0xJord4n)\n\n[![](https://avatars.githubusercontent.com/u/29782447?u=a8804de5269d64ef1c2587945e1b40925349c4a0&v=4)](https://github.com/tabbyl21)[@tabbyl21](https://github.com/tabbyl21)\n\n[![](https://avatars.githubusercontent.com/u/38180263?u=d514276e558f3f3aaba4844fdeb14eb84e9c8cc2&v=4)](https://github.com/naman-modi)[@naman-modi](https://github.com/naman-modi)\n\n[![](https://avatars.githubusercontent.com/u/126395124?u=79cff420daf96b72b14caca0061b57b884139f4f&v=4)](https://github.com/sokolgood)[@sokolgood](https://github.com/sokolgood)\n\n[![](https://avatars.githubusercontent.com/u/2310608?u=1e5009aa6681eed766a14cfb8849d820821dddce&v=4)](https://github.com/harelix)[@harelix](https://github.com/harelix)\n\n[![](https://avatars.githubusercontent.com/u/107643?v=4)](https://github.com/standby24x7)[@standby24x7](https://github.com/standby24x7)\n\n[![](https://avatars.githubusercontent.com/u/37549748?v=4)](https://github.com/lts-rad)[@lts-rad](https://github.com/lts-rad)\n\n[![](https://avatars.githubusercontent.com/u/829644?u=56a7fd939b2d15ed21011497db77ad3f569e8a60&v=4)](https://github.com/mengxr)[@mengxr](https://github.com/mengxr)\n\n[![](https://avatars.githubusercontent.com/u/9869689?u=b572050134e1e6a3c0096d2b032a5dec32725222&v=4)](https://github.com/nuric)[@nuric](https://github.com/nuric)\n\n[![](https://avatars.githubusercontent.com/u/16749003?v=4)](https://github.com/akshaya-a)[@akshaya-a](https://github.com/akshaya-a)\n\n[![](https://avatars.githubusercontent.com/u/16641288?u=f659a34367a54ea7ac49bc2a51ac27f4a72c770b&v=4)](https://github.com/edreisMD)[@edreisMD](https://github.com/edreisMD)\n\n[![](https://avatars.githubusercontent.com/u/18373802?u=92b9ba56d4178115777a0a1a7d2bf88c162f3fce&v=4)](https://github.com/ar-mccabe)[@ar-mccabe](https://github.com/ar-mccabe)\n\n[![](https://avatars.githubusercontent.com/u/98005188?u=21b5e30aa6464f46e85aa006cb44b2bd18c89347&v=4)](https://github.com/Navanit-git)[@Navanit-git](https://github.com/Navanit-git)\n\n[![](https://avatars.githubusercontent.com/u/127131037?u=74ffbf6c2a443f51f7e72d00b0a4e9a30b9e1c4c&v=4)](https://github.com/david-huge)[@david-huge](https://github.com/david-huge)\n\n[![](https://avatars.githubusercontent.com/u/91344214?u=5c34c21b464a6bbffd83a07aafac2cf9076856db&v=4)](https://github.com/rotemweiss57)[@rotemweiss57](https://github.com/rotemweiss57)\n\n[![](https://avatars.githubusercontent.com/u/9272497?u=bde02b58aebeb42b77cd6678456e8ead7f50ab66&v=4)](https://github.com/hmilkovi)[@hmilkovi](https://github.com/hmilkovi)\n\n[![](https://avatars.githubusercontent.com/u/42059733?u=502e381ca0e17491298e90ac3c5db019dd484efc&v=4)](https://github.com/vreyespue)[@vreyespue](https://github.com/vreyespue)\n\n[![](https://avatars.githubusercontent.com/u/2792?u=f5d3e57d22f60b27f9c87430dc45bceb49e88215&v=4)](https://github.com/deepblue)[@deepblue](https://github.com/deepblue)\n\n[![](https://avatars.githubusercontent.com/u/6087484?u=45381a549e19872d386ca7a7bf399dd571f2f3e8&v=4)](https://github.com/niklub)[@niklub](https://github.com/niklub)\n\n[![](https://avatars.githubusercontent.com/u/1081215?v=4)](https://github.com/dirtysalt)[@dirtysalt](https://github.com/dirtysalt)\n\n[![](https://avatars.githubusercontent.com/u/2138258?u=7de291a1ce0c95d6589496ba8e1d056c054ced00&v=4)](https://github.com/zeiler)[@zeiler](https://github.com/zeiler)\n\n[![](https://avatars.githubusercontent.com/u/16364994?u=d8603567cb87b4f76f0df2f7937252ae040cbebf&v=4)](https://github.com/sachinparyani)[@sachinparyani](https://github.com/sachinparyani)\n\n[![](https://avatars.githubusercontent.com/u/27913091?u=af5f1ab3c8383109dfed085fd2e2aa09599dece8&v=4)](https://github.com/ju-bezdek)[@ju-bezdek](https://github.com/ju-bezdek)\n\n[![](https://avatars.githubusercontent.com/u/108557828?u=1f1cc6b7e04613034c6ee4add7846c5a7333da26&v=4)](https://github.com/ColabDog)[@ColabDog](https://github.com/ColabDog)\n\n[![](https://avatars.githubusercontent.com/u/37485638?u=2552fdd04d05df363fa34b99c3cd3392762bf626&v=4)](https://github.com/hanit-com)[@hanit-com](https://github.com/hanit-com)\n\n[![](https://avatars.githubusercontent.com/u/2748495?v=4)](https://github.com/manmax31)[@manmax31](https://github.com/manmax31)\n\n[![](https://avatars.githubusercontent.com/u/38863?v=4)](https://github.com/imrehg)[@imrehg](https://github.com/imrehg)\n\n[![](https://avatars.githubusercontent.com/u/1454551?u=14928571307ed348c362e902edc913f6d81fea07&v=4)](https://github.com/janchorowski)[@janchorowski](https://github.com/janchorowski)\n\n[![](https://avatars.githubusercontent.com/u/90774897?v=4)](https://github.com/AthulVincent)[@AthulVincent](https://github.com/AthulVincent)\n\n[![](https://avatars.githubusercontent.com/u/23078323?u=7524c4ab19b061e21e62ddd6b48b6084fd6d54c1&v=4)](https://github.com/tamohannes)[@tamohannes](https://github.com/tamohannes)\n\n[![](https://avatars.githubusercontent.com/u/49598618?u=2d8024560f2f936312e819348cc18db338961fb7&v=4)](https://github.com/boazwasserman)[@boazwasserman](https://github.com/boazwasserman)\n\n[![](https://avatars.githubusercontent.com/u/30856?v=4)](https://github.com/dsummersl)[@dsummersl](https://github.com/dsummersl)\n\n[![](https://avatars.githubusercontent.com/u/280981?u=6c969bb88d84ac2c2ea100389504f63ac9155425&v=4)](https://github.com/idvorkin)[@idvorkin](https://github.com/idvorkin)\n\n[![](https://avatars.githubusercontent.com/u/24319338?v=4)](https://github.com/vempaliakhil96)[@vempaliakhil96](https://github.com/vempaliakhil96)\n\n[![](https://avatars.githubusercontent.com/u/18140070?u=1992cdb13c62ee66f4ccc8f000d2c6efae3056c3&v=4)](https://github.com/C-K-Loan)[@C-K-Loan](https://github.com/C-K-Loan)\n\n[![](https://avatars.githubusercontent.com/u/18020640?u=d47ad1cc8fb82340d1c77d1f191038372987f85a&v=4)](https://github.com/daniel-brenot)[@daniel-brenot](https://github.com/daniel-brenot)\n\n[![](https://avatars.githubusercontent.com/u/20795854?u=e0a8116151662cf0126b274f74fd279f34febf93&v=4)](https://github.com/jwbeck97)[@jwbeck97](https://github.com/jwbeck97)\n\nWe're so thankful for your support!\n\nAnd one more thank you to [@tiangolo](https://github.com/tiangolo) for inspiration via FastAPI's [excellent people page](https://fastapi.tiangolo.com/fastapi-people).\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/people.mdx)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E)."},"last_modified":{"kind":"null"}}},{"rowIdx":1380,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/llm_caching/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to cache LLM responses\n\nOn this page\n\nHow to cache LLM responses\n==========================\n\nLangChain provides an optional caching layer for LLMs. This is useful for two reasons:\n\nIt can save you money by reducing the number of API calls you make to the LLM provider, if you're often requesting the same completion multiple times. It can speed up your application by reducing the number of API calls you make to the LLM provider.\n\n from langchain.globals import set_llm_cachefrom langchain_openai import OpenAI# To make the caching really obvious, lets use a slower model.llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", n=2, best_of=2)\n\n**API Reference:**[set\\_llm\\_cache](https://api.python.langchain.com/en/latest/globals/langchain.globals.set_llm_cache.html) | [OpenAI](https://api.python.langchain.com/en/latest/llms/langchain_openai.llms.base.OpenAI.html)\n\n %%timefrom langchain.cache import InMemoryCacheset_llm_cache(InMemoryCache())# The first time, it is not yet in cache, so it should take longerllm.predict(\"Tell me a joke\")\n\n**API Reference:**[InMemoryCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.InMemoryCache.html)\n\n CPU times: user 13.7 ms, sys: 6.54 ms, total: 20.2 msWall time: 330 ms\n\n \"\\n\\nWhy couldn't the bicycle stand up by itself? Because it was two-tired!\"\n\n %%time# The second time it is, so it goes fasterllm.predict(\"Tell me a joke\")\n\n CPU times: user 436 µs, sys: 921 µs, total: 1.36 msWall time: 1.36 ms\n\n \"\\n\\nWhy couldn't the bicycle stand up by itself? Because it was two-tired!\"\n\nSQLite Cache[​](#sqlite-cache \"Direct link to SQLite Cache\")\n------------------------------------------------------------\n\n !rm .langchain.db\n\n # We can do the same thing with a SQLite cachefrom langchain_community.cache import SQLiteCacheset_llm_cache(SQLiteCache(database_path=\".langchain.db\"))\n\n**API Reference:**[SQLiteCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.SQLiteCache.html)\n\n %%time# The first time, it is not yet in cache, so it should take longerllm.predict(\"Tell me a joke\")\n\n CPU times: user 29.3 ms, sys: 17.3 ms, total: 46.7 msWall time: 364 ms\n\n '\\n\\nWhy did the tomato turn red?\\n\\nBecause it saw the salad dressing!'\n\n %%time# The second time it is, so it goes fasterllm.predict(\"Tell me a joke\")\n\n CPU times: user 4.58 ms, sys: 2.23 ms, total: 6.8 msWall time: 4.68 ms\n\n '\\n\\nWhy did the tomato turn red?\\n\\nBecause it saw the salad dressing!'\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/llm_caching.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nLangChain Expression Language Cheatsheet\n\n](/v0.2/docs/how_to/lcel_cheatsheet/)[\n\nNext\n\nHow to track token usage for LLMs\n\n](/v0.2/docs/how_to/llm_token_usage_tracking/)\n\n* [SQLite Cache](#sqlite-cache)"},"last_modified":{"kind":"null"}}},{"rowIdx":1381,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/logprobs/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to get log probabilities\n\nOn this page\n\nHow to get log probabilities\n============================\n\nPrerequisites\n\nThis guide assumes familiarity with the following concepts:\n\n* [Chat models](/v0.2/docs/concepts/#chat-models)\n\nCertain chat models can be configured to return token-level log probabilities representing the likelihood of a given token. This guide walks through how to get this information in LangChain.\n\nOpenAI[​](#openai \"Direct link to OpenAI\")\n------------------------------------------\n\nInstall the LangChain x OpenAI package and set your API key\n\n %pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n\nFor the OpenAI API to return log probabilities we need to configure the `logprobs=True` param. Then, the logprobs are included on each output [`AIMessage`](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html) as part of the `response_metadata`:\n\n from langchain_openai import ChatOpenAIllm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\").bind(logprobs=True)msg = llm.invoke((\"human\", \"how are you today\"))msg.response_metadata[\"logprobs\"][\"content\"][:5]\n\n**API Reference:**[ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html)\n\n [{'token': 'I', 'bytes': [73], 'logprob': -0.26341408, 'top_logprobs': []}, {'token': \"'m\", 'bytes': [39, 109], 'logprob': -0.48584133, 'top_logprobs': []}, {'token': ' just', 'bytes': [32, 106, 117, 115, 116], 'logprob': -0.23484154, 'top_logprobs': []}, {'token': ' a', 'bytes': [32, 97], 'logprob': -0.0018291725, 'top_logprobs': []}, {'token': ' computer', 'bytes': [32, 99, 111, 109, 112, 117, 116, 101, 114], 'logprob': -0.052299336, 'top_logprobs': []}]\n\nAnd are part of streamed Message chunks as well:\n\n ct = 0full = Nonefor chunk in llm.stream((\"human\", \"how are you today\")): if ct < 5: full = chunk if full is None else full + chunk if \"logprobs\" in full.response_metadata: print(full.response_metadata[\"logprobs\"][\"content\"]) else: break ct += 1\n\n [][{'token': 'I', 'bytes': [73], 'logprob': -0.26593843, 'top_logprobs': []}][{'token': 'I', 'bytes': [73], 'logprob': -0.26593843, 'top_logprobs': []}, {'token': \"'m\", 'bytes': [39, 109], 'logprob': -0.3238896, 'top_logprobs': []}][{'token': 'I', 'bytes': [73], 'logprob': -0.26593843, 'top_logprobs': []}, {'token': \"'m\", 'bytes': [39, 109], 'logprob': -0.3238896, 'top_logprobs': []}, {'token': ' just', 'bytes': [32, 106, 117, 115, 116], 'logprob': -0.23778509, 'top_logprobs': []}][{'token': 'I', 'bytes': [73], 'logprob': -0.26593843, 'top_logprobs': []}, {'token': \"'m\", 'bytes': [39, 109], 'logprob': -0.3238896, 'top_logprobs': []}, {'token': ' just', 'bytes': [32, 106, 117, 115, 116], 'logprob': -0.23778509, 'top_logprobs': []}, {'token': ' a', 'bytes': [32, 97], 'logprob': -0.0022134194, 'top_logprobs': []}]\n\nNext steps[​](#next-steps \"Direct link to Next steps\")\n------------------------------------------------------\n\nYou've now learned how to get logprobs from OpenAI models in LangChain.\n\nNext, check out the other how-to guides chat models in this section, like [how to get a model to return structured output](/v0.2/docs/how_to/structured_output/) or [how to track token usage](/v0.2/docs/how_to/chat_token_usage_tracking/).\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/logprobs.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nRun LLMs locally\n\n](/v0.2/docs/how_to/local_llms/)[\n\nNext\n\nHow to reorder retrieved results to mitigate the \"lost in the middle\" effect\n\n](/v0.2/docs/how_to/long_context_reorder/)\n\n* [OpenAI](#openai)\n* [Next steps](#next-steps)"},"last_modified":{"kind":"null"}}},{"rowIdx":1382,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/local_llms/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* Run LLMs locally\n\nOn this page\n\nRun LLMs locally\n================\n\nUse case[​](#use-case \"Direct link to Use case\")\n------------------------------------------------\n\nThe popularity of projects like [PrivateGPT](https://github.com/imartinez/privateGPT), [llama.cpp](https://github.com/ggerganov/llama.cpp), [Ollama](https://github.com/ollama/ollama), [GPT4All](https://github.com/nomic-ai/gpt4all), [llamafile](https://github.com/Mozilla-Ocho/llamafile), and others underscore the demand to run LLMs locally (on your own device).\n\nThis has at least two important benefits:\n\n1. `Privacy`: Your data is not sent to a third party, and it is not subject to the terms of service of a commercial service\n2. `Cost`: There is no inference fee, which is important for token-intensive applications (e.g., [long-running simulations](https://twitter.com/RLanceMartin/status/1691097659262820352?s=20), summarization)\n\nOverview[​](#overview \"Direct link to Overview\")\n------------------------------------------------\n\nRunning an LLM locally requires a few things:\n\n1. `Open-source LLM`: An open-source LLM that can be freely modified and shared\n2. `Inference`: Ability to run this LLM on your device w/ acceptable latency\n\n### Open-source LLMs[​](#open-source-llms \"Direct link to Open-source LLMs\")\n\nUsers can now gain access to a rapidly growing set of [open-source LLMs](https://cameronrwolfe.substack.com/p/the-history-of-open-source-llms-better).\n\nThese LLMs can be assessed across at least two dimensions (see figure):\n\n1. `Base model`: What is the base-model and how was it trained?\n2. `Fine-tuning approach`: Was the base-model fine-tuned and, if so, what [set of instructions](https://cameronrwolfe.substack.com/p/beyond-llama-the-power-of-open-llms#%C2%A7alpaca-an-instruction-following-llama-model) was used?\n\n![Image description](/v0.2/assets/images/OSS_LLM_overview-9444c9793c76bd4785a5b0cd020c14ef.png)\n\nThe relative performance of these models can be assessed using several leaderboards, including:\n\n1. [LmSys](https://chat.lmsys.org/?arena)\n2. [GPT4All](https://gpt4all.io/index.html)\n3. [HuggingFace](https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard)\n\n### Inference[​](#inference \"Direct link to Inference\")\n\nA few frameworks for this have emerged to support inference of open-source LLMs on various devices:\n\n1. [`llama.cpp`](https://github.com/ggerganov/llama.cpp): C++ implementation of llama inference code with [weight optimization / quantization](https://finbarr.ca/how-is-llama-cpp-possible/)\n2. [`gpt4all`](https://docs.gpt4all.io/index.html): Optimized C backend for inference\n3. [`Ollama`](https://ollama.ai/): Bundles model weights and environment into an app that runs on device and serves the LLM\n4. [`llamafile`](https://github.com/Mozilla-Ocho/llamafile): Bundles model weights and everything needed to run the model in a single file, allowing you to run the LLM locally from this file without any additional installation steps\n\nIn general, these frameworks will do a few things:\n\n1. `Quantization`: Reduce the memory footprint of the raw model weights\n2. `Efficient implementation for inference`: Support inference on consumer hardware (e.g., CPU or laptop GPU)\n\nIn particular, see [this excellent post](https://finbarr.ca/how-is-llama-cpp-possible/) on the importance of quantization.\n\n![Image description](/v0.2/assets/images/llama-memory-weights-aaccef5df087e993b0f46277500039b6.png)\n\nWith less precision, we radically decrease the memory needed to store the LLM in memory.\n\nIn addition, we can see the importance of GPU memory bandwidth [sheet](https://docs.google.com/spreadsheets/d/1OehfHHNSn66BP2h3Bxp2NJTVX97icU0GmCXF6pK23H8/edit#gid=0)!\n\nA Mac M2 Max is 5-6x faster than a M1 for inference due to the larger GPU memory bandwidth.\n\n![Image description](/v0.2/assets/images/llama_t_put-c6f0ea201a6dd508999170325cd6804a.png)\n\nQuickstart[​](#quickstart \"Direct link to Quickstart\")\n------------------------------------------------------\n\n[`Ollama`](https://ollama.ai/) is one way to easily run inference on macOS.\n\nThe instructions [here](https://github.com/jmorganca/ollama?tab=readme-ov-file#ollama) provide details, which we summarize:\n\n* [Download and run](https://ollama.ai/download) the app\n* From command line, fetch a model from this [list of options](https://github.com/jmorganca/ollama): e.g., `ollama pull llama2`\n* When the app is running, all models are automatically served on `localhost:11434`\n\n from langchain_community.llms import Ollamallm = Ollama(model=\"llama2\")llm.invoke(\"The first man on the moon was ...\")\n\n**API Reference:**[Ollama](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.ollama.Ollama.html)\n\n ' The first man on the moon was Neil Armstrong, who landed on the moon on July 20, 1969 as part of the Apollo 11 mission. obviously.'\n\nStream tokens as they are being generated.\n\n from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandlerllm = Ollama( model=\"llama2\", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]))llm.invoke(\"The first man on the moon was ...\")\n\n**API Reference:**[CallbackManager](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.CallbackManager.html) | [StreamingStdOutCallbackHandler](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.streaming_stdout.StreamingStdOutCallbackHandler.html)\n\n The first man to walk on the moon was Neil Armstrong, an American astronaut who was part of the Apollo 11 mission in 1969. февруари 20, 1969, Armstrong stepped out of the lunar module Eagle and onto the moon's surface, famously declaring \"That's one small step for man, one giant leap for mankind\" as he took his first steps. He was followed by fellow astronaut Edwin \"Buzz\" Aldrin, who also walked on the moon during the mission.\n\n ' The first man to walk on the moon was Neil Armstrong, an American astronaut who was part of the Apollo 11 mission in 1969. февруари 20, 1969, Armstrong stepped out of the lunar module Eagle and onto the moon\\'s surface, famously declaring \"That\\'s one small step for man, one giant leap for mankind\" as he took his first steps. He was followed by fellow astronaut Edwin \"Buzz\" Aldrin, who also walked on the moon during the mission.'\n\nEnvironment[​](#environment \"Direct link to Environment\")\n---------------------------------------------------------\n\nInference speed is a challenge when running models locally (see above).\n\nTo minimize latency, it is desirable to run models locally on GPU, which ships with many consumer laptops [e.g., Apple devices](https://www.apple.com/newsroom/2022/06/apple-unveils-m2-with-breakthrough-performance-and-capabilities/).\n\nAnd even with GPU, the available GPU memory bandwidth (as noted above) is important.\n\n### Running Apple silicon GPU[​](#running-apple-silicon-gpu \"Direct link to Running Apple silicon GPU\")\n\n`Ollama` and [`llamafile`](https://github.com/Mozilla-Ocho/llamafile?tab=readme-ov-file#gpu-support) will automatically utilize the GPU on Apple devices.\n\nOther frameworks require the user to set up the environment to utilize the Apple GPU.\n\nFor example, `llama.cpp` python bindings can be configured to use the GPU via [Metal](https://developer.apple.com/metal/).\n\nMetal is a graphics and compute API created by Apple providing near-direct access to the GPU.\n\nSee the [`llama.cpp`](/v0.2/docs/how_to/local_llms/docs/integrations/llms/llamacpp/) setup [here](https://github.com/abetlen/llama-cpp-python/blob/main/docs/install/macos.md) to enable this.\n\nIn particular, ensure that conda is using the correct virtual environment that you created (`miniforge3`).\n\nE.g., for me:\n\n conda activate /Users/rlm/miniforge3/envs/llama\n\nWith the above confirmed, then:\n\n CMAKE_ARGS=\"-DLLAMA_METAL=on\" FORCE_CMAKE=1 pip install -U llama-cpp-python --no-cache-dir\n\nLLMs[​](#llms \"Direct link to LLMs\")\n------------------------------------\n\nThere are various ways to gain access to quantized model weights.\n\n1. [`HuggingFace`](https://huggingface.co/TheBloke) - Many quantized model are available for download and can be run with framework such as [`llama.cpp`](https://github.com/ggerganov/llama.cpp). You can also download models in [`llamafile` format](https://huggingface.co/models?other=llamafile) from HuggingFace.\n2. [`gpt4all`](https://gpt4all.io/index.html) - The model explorer offers a leaderboard of metrics and associated quantized models available for download\n3. [`Ollama`](https://github.com/jmorganca/ollama) - Several models can be accessed directly via `pull`\n\n### Ollama[​](#ollama \"Direct link to Ollama\")\n\nWith [Ollama](https://github.com/jmorganca/ollama), fetch a model via `ollama pull :`:\n\n* E.g., for Llama-7b: `ollama pull llama2` will download the most basic version of the model (e.g., smallest # parameters and 4 bit quantization)\n* We can also specify a particular version from the [model list](https://github.com/jmorganca/ollama?tab=readme-ov-file#model-library), e.g., `ollama pull llama2:13b`\n* See the full set of parameters on the [API reference page](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.ollama.Ollama.html)\n\n from langchain_community.llms import Ollamallm = Ollama(model=\"llama2:13b\")llm.invoke(\"The first man on the moon was ... think step by step\")\n\n**API Reference:**[Ollama](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.ollama.Ollama.html)\n\n ' Sure! Here\\'s the answer, broken down step by step:\\n\\nThe first man on the moon was... Neil Armstrong.\\n\\nHere\\'s how I arrived at that answer:\\n\\n1. The first manned mission to land on the moon was Apollo 11.\\n2. The mission included three astronauts: Neil Armstrong, Edwin \"Buzz\" Aldrin, and Michael Collins.\\n3. Neil Armstrong was the mission commander and the first person to set foot on the moon.\\n4. On July 20, 1969, Armstrong stepped out of the lunar module Eagle and onto the moon\\'s surface, famously declaring \"That\\'s one small step for man, one giant leap for mankind.\"\\n\\nSo, the first man on the moon was Neil Armstrong!'\n\n### Llama.cpp[​](#llamacpp \"Direct link to Llama.cpp\")\n\nLlama.cpp is compatible with a [broad set of models](https://github.com/ggerganov/llama.cpp).\n\nFor example, below we run inference on `llama2-13b` with 4 bit quantization downloaded from [HuggingFace](https://huggingface.co/TheBloke/Llama-2-13B-GGML/tree/main).\n\nAs noted above, see the [API reference](https://api.python.langchain.com/en/latest/llms/langchain.llms.llamacpp.LlamaCpp.html?highlight=llamacpp#langchain.llms.llamacpp.LlamaCpp) for the full set of parameters.\n\nFrom the [llama.cpp API reference docs](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.llamacpp.LlamaCpp.htm), a few are worth commenting on:\n\n`n_gpu_layers`: number of layers to be loaded into GPU memory\n\n* Value: 1\n* Meaning: Only one layer of the model will be loaded into GPU memory (1 is often sufficient).\n\n`n_batch`: number of tokens the model should process in parallel\n\n* Value: n\\_batch\n* Meaning: It's recommended to choose a value between 1 and n\\_ctx (which in this case is set to 2048)\n\n`n_ctx`: Token context window\n\n* Value: 2048\n* Meaning: The model will consider a window of 2048 tokens at a time\n\n`f16_kv`: whether the model should use half-precision for the key/value cache\n\n* Value: True\n* Meaning: The model will use half-precision, which can be more memory efficient; Metal only supports True.\n\n %env CMAKE_ARGS=\"-DLLAMA_METAL=on\"%env FORCE_CMAKE=1%pip install --upgrade --quiet llama-cpp-python --no-cache-dirclear\n\n from langchain_community.llms import LlamaCppfrom langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandlerllm = LlamaCpp( model_path=\"/Users/rlm/Desktop/Code/llama.cpp/models/openorca-platypus2-13b.gguf.q4_0.bin\", n_gpu_layers=1, n_batch=512, n_ctx=2048, f16_kv=True, callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]), verbose=True,)\n\n**API Reference:**[LlamaCpp](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.llamacpp.LlamaCpp.html) | [CallbackManager](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.CallbackManager.html) | [StreamingStdOutCallbackHandler](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.streaming_stdout.StreamingStdOutCallbackHandler.html)\n\nThe console log will show the below to indicate Metal was enabled properly from steps above:\n\n ggml_metal_init: allocatingggml_metal_init: using MPS\n\n llm.invoke(\"The first man on the moon was ... Let's think step by step\")\n\n Llama.generate: prefix-match hit``````output and use logical reasoning to figure out who the first man on the moon was.Here are some clues:1. The first man on the moon was an American.2. He was part of the Apollo 11 mission.3. He stepped out of the lunar module and became the first person to set foot on the moon's surface.4. His last name is Armstrong.Now, let's use our reasoning skills to figure out who the first man on the moon was. Based on clue #1, we know that the first man on the moon was an American. Clue #2 tells us that he was part of the Apollo 11 mission. Clue #3 reveals that he was the first person to set foot on the moon's surface. And finally, clue #4 gives us his last name: Armstrong.Therefore, the first man on the moon was Neil Armstrong!``````outputllama_print_timings: load time = 9623.21 msllama_print_timings: sample time = 143.77 ms / 203 runs ( 0.71 ms per token, 1412.01 tokens per second)llama_print_timings: prompt eval time = 485.94 ms / 7 tokens ( 69.42 ms per token, 14.40 tokens per second)llama_print_timings: eval time = 6385.16 ms / 202 runs ( 31.61 ms per token, 31.64 tokens per second)llama_print_timings: total time = 7279.28 ms\n\n \" and use logical reasoning to figure out who the first man on the moon was.\\n\\nHere are some clues:\\n\\n1. The first man on the moon was an American.\\n2. He was part of the Apollo 11 mission.\\n3. He stepped out of the lunar module and became the first person to set foot on the moon's surface.\\n4. His last name is Armstrong.\\n\\nNow, let's use our reasoning skills to figure out who the first man on the moon was. Based on clue #1, we know that the first man on the moon was an American. Clue #2 tells us that he was part of the Apollo 11 mission. Clue #3 reveals that he was the first person to set foot on the moon's surface. And finally, clue #4 gives us his last name: Armstrong.\\nTherefore, the first man on the moon was Neil Armstrong!\"\n\n### GPT4All[​](#gpt4all \"Direct link to GPT4All\")\n\nWe can use model weights downloaded from [GPT4All](/v0.2/docs/integrations/llms/gpt4all/) model explorer.\n\nSimilar to what is shown above, we can run inference and use [the API reference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.gpt4all.GPT4All.html) to set parameters of interest.\n\n %pip install gpt4all\n\n from langchain_community.llms import GPT4Allllm = GPT4All( model=\"/Users/rlm/Desktop/Code/gpt4all/models/nous-hermes-13b.ggmlv3.q4_0.bin\")\n\n**API Reference:**[GPT4All](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.gpt4all.GPT4All.html)\n\n llm.invoke(\"The first man on the moon was ... Let's think step by step\")\n\n \".\\n1) The United States decides to send a manned mission to the moon.2) They choose their best astronauts and train them for this specific mission.3) They build a spacecraft that can take humans to the moon, called the Lunar Module (LM).4) They also create a larger spacecraft, called the Saturn V rocket, which will launch both the LM and the Command Service Module (CSM), which will carry the astronauts into orbit.5) The mission is planned down to the smallest detail: from the trajectory of the rockets to the exact movements of the astronauts during their moon landing.6) On July 16, 1969, the Saturn V rocket launches from Kennedy Space Center in Florida, carrying the Apollo 11 mission crew into space.7) After one and a half orbits around the Earth, the LM separates from the CSM and begins its descent to the moon's surface.8) On July 20, 1969, at 2:56 pm EDT (GMT-4), Neil Armstrong becomes the first man on the moon. He speaks these\"\n\n### llamafile[​](#llamafile \"Direct link to llamafile\")\n\nOne of the simplest ways to run an LLM locally is using a [llamafile](https://github.com/Mozilla-Ocho/llamafile). All you need to do is:\n\n1) Download a llamafile from [HuggingFace](https://huggingface.co/models?other=llamafile) 2) Make the file executable 3) Run the file\n\nllamafiles bundle model weights and a [specially-compiled](https://github.com/Mozilla-Ocho/llamafile?tab=readme-ov-file#technical-details) version of [`llama.cpp`](https://github.com/ggerganov/llama.cpp) into a single file that can run on most computers any additional dependencies. They also come with an embedded inference server that provides an [API](https://github.com/Mozilla-Ocho/llamafile/blob/main/llama.cpp/server/README.md#api-endpoints) for interacting with your model.\n\nHere's a simple bash script that shows all 3 setup steps:\n\n # Download a llamafile from HuggingFacewget https://huggingface.co/jartine/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/TinyLlama-1.1B-Chat-v1.0.Q5_K_M.llamafile# Make the file executable. On Windows, instead just rename the file to end in \".exe\".chmod +x TinyLlama-1.1B-Chat-v1.0.Q5_K_M.llamafile# Start the model server. Listens at http://localhost:8080 by default../TinyLlama-1.1B-Chat-v1.0.Q5_K_M.llamafile --server --nobrowser\n\nAfter you run the above setup steps, you can use LangChain to interact with your model:\n\n from langchain_community.llms.llamafile import Llamafilellm = Llamafile()llm.invoke(\"The first man on the moon was ... Let's think step by step.\")\n\n**API Reference:**[Llamafile](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.llamafile.Llamafile.html)\n\n \"\\nFirstly, let's imagine the scene where Neil Armstrong stepped onto the moon. This happened in 1969. The first man on the moon was Neil Armstrong. We already know that.\\n2nd, let's take a step back. Neil Armstrong didn't have any special powers. He had to land his spacecraft safely on the moon without injuring anyone or causing any damage. If he failed to do this, he would have been killed along with all those people who were on board the spacecraft.\\n3rd, let's imagine that Neil Armstrong successfully landed his spacecraft on the moon and made it back to Earth safely. The next step was for him to be hailed as a hero by his people back home. It took years before Neil Armstrong became an American hero.\\n4th, let's take another step back. Let's imagine that Neil Armstrong wasn't hailed as a hero, and instead, he was just forgotten. This happened in the 1970s. Neil Armstrong wasn't recognized for his remarkable achievement on the moon until after he died.\\n5th, let's take another step back. Let's imagine that Neil Armstrong didn't die in the 1970s and instead, lived to be a hundred years old. This happened in 2036. In the year 2036, Neil Armstrong would have been a centenarian.\\nNow, let's think about the present. Neil Armstrong is still alive. He turned 95 years old on July 20th, 2018. If he were to die now, his achievement of becoming the first human being to set foot on the moon would remain an unforgettable moment in history.\\nI hope this helps you understand the significance and importance of Neil Armstrong's achievement on the moon!\"\n\nPrompts[​](#prompts \"Direct link to Prompts\")\n---------------------------------------------\n\nSome LLMs will benefit from specific prompts.\n\nFor example, LLaMA will use [special tokens](https://twitter.com/RLanceMartin/status/1681879318493003776?s=20).\n\nWe can use `ConditionalPromptSelector` to set prompt based on the model type.\n\n # Set our LLMllm = LlamaCpp( model_path=\"/Users/rlm/Desktop/Code/llama.cpp/models/openorca-platypus2-13b.gguf.q4_0.bin\", n_gpu_layers=1, n_batch=512, n_ctx=2048, f16_kv=True, callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]), verbose=True,)\n\nSet the associated prompt based upon the model version.\n\n from langchain.chains import LLMChainfrom langchain.chains.prompt_selector import ConditionalPromptSelectorfrom langchain_core.prompts import PromptTemplateDEFAULT_LLAMA_SEARCH_PROMPT = PromptTemplate( input_variables=[\"question\"], template=\"\"\"<> \\n You are an assistant tasked with improving Google search \\results. \\n <> \\n\\n [INST] Generate THREE Google search queries that \\are similar to this question. The output should be a numbered list of questions \\and each should have a question mark at the end: \\n\\n {question} [/INST]\"\"\",)DEFAULT_SEARCH_PROMPT = PromptTemplate( input_variables=[\"question\"], template=\"\"\"You are an assistant tasked with improving Google search \\results. Generate THREE Google search queries that are similar to \\this question. The output should be a numbered list of questions and each \\should have a question mark at the end: {question}\"\"\",)QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector( default_prompt=DEFAULT_SEARCH_PROMPT, conditionals=[(lambda llm: isinstance(llm, LlamaCpp), DEFAULT_LLAMA_SEARCH_PROMPT)],)prompt = QUESTION_PROMPT_SELECTOR.get_prompt(llm)prompt\n\n**API Reference:**[LLMChain](https://api.python.langchain.com/en/latest/chains/langchain.chains.llm.LLMChain.html) | [ConditionalPromptSelector](https://api.python.langchain.com/en/latest/chains/langchain.chains.prompt_selector.ConditionalPromptSelector.html) | [PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html)\n\n PromptTemplate(input_variables=['question'], output_parser=None, partial_variables={}, template='<> \\n You are an assistant tasked with improving Google search results. \\n <> \\n\\n [INST] Generate THREE Google search queries that are similar to this question. The output should be a numbered list of questions and each should have a question mark at the end: \\n\\n {question} [/INST]', template_format='f-string', validate_template=True)\n\n # Chainllm_chain = LLMChain(prompt=prompt, llm=llm)question = \"What NFL team won the Super Bowl in the year that Justin Bieber was born?\"llm_chain.run({\"question\": question})\n\n Sure! Here are three similar search queries with a question mark at the end:1. Which NBA team did LeBron James lead to a championship in the year he was drafted?2. Who won the Grammy Awards for Best New Artist and Best Female Pop Vocal Performance in the same year that Lady Gaga was born?3. What MLB team did Babe Ruth play for when he hit 60 home runs in a single season?``````outputllama_print_timings: load time = 14943.19 msllama_print_timings: sample time = 72.93 ms / 101 runs ( 0.72 ms per token, 1384.87 tokens per second)llama_print_timings: prompt eval time = 14942.95 ms / 93 tokens ( 160.68 ms per token, 6.22 tokens per second)llama_print_timings: eval time = 3430.85 ms / 100 runs ( 34.31 ms per token, 29.15 tokens per second)llama_print_timings: total time = 18578.26 ms\n\n ' Sure! Here are three similar search queries with a question mark at the end:\\n\\n1. Which NBA team did LeBron James lead to a championship in the year he was drafted?\\n2. Who won the Grammy Awards for Best New Artist and Best Female Pop Vocal Performance in the same year that Lady Gaga was born?\\n3. What MLB team did Babe Ruth play for when he hit 60 home runs in a single season?'\n\nWe also can use the LangChain Prompt Hub to fetch and / or store prompts that are model specific.\n\nThis will work with your [LangSmith API key](https://docs.smith.langchain.com/).\n\nFor example, [here](https://smith.langchain.com/hub/rlm/rag-prompt-llama) is a prompt for RAG with LLaMA-specific tokens.\n\nUse cases[​](#use-cases \"Direct link to Use cases\")\n---------------------------------------------------\n\nGiven an `llm` created from one of the models above, you can use it for [many use cases](/v0.2/docs/how_to/#use-cases).\n\nFor example, here is a guide to [RAG](/v0.2/docs/tutorials/local_rag/) with local LLMs.\n\nIn general, use cases for local LLMs can be driven by at least two factors:\n\n* `Privacy`: private data (e.g., journals, etc) that a user does not want to share\n* `Cost`: text preprocessing (extraction/tagging), summarization, and agent simulations are token-use-intensive tasks\n\nIn addition, [here](https://blog.langchain.dev/using-langsmith-to-support-fine-tuning-of-open-source-llms/) is an overview on fine-tuning, which can utilize open-source LLMs.\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/local_llms.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to track token usage for LLMs\n\n](/v0.2/docs/how_to/llm_token_usage_tracking/)[\n\nNext\n\nHow to get log probabilities\n\n](/v0.2/docs/how_to/logprobs/)\n\n* [Use case](#use-case)\n* [Overview](#overview)\n * [Open-source LLMs](#open-source-llms)\n * [Inference](#inference)\n* [Quickstart](#quickstart)\n* [Environment](#environment)\n * [Running Apple silicon GPU](#running-apple-silicon-gpu)\n* [LLMs](#llms)\n * [Ollama](#ollama)\n * [Llama.cpp](#llamacpp)\n * [GPT4All](#gpt4all)\n * [llamafile](#llamafile)\n* [Prompts](#prompts)\n* [Use cases](#use-cases)"},"last_modified":{"kind":"null"}}},{"rowIdx":1383,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/additional_resources/arxiv_references/"},"markdown":{"kind":"string","value":"On this page\n\narXiv\n=====\n\nLangChain implements the latest research in the field of Natural Language Processing. This page contains `arXiv` papers referenced in the LangChain Documentation, API Reference, Templates, and Cookbooks.\n\nFrom the opposite direction, scientists use LangChain in research and reference LangChain in the research papers. Here you find [such papers](https://arxiv.org/search/?query=langchain&searchtype=all&source=header).\n\nSummary[​](#summary \"Direct link to Summary\")\n---------------------------------------------\n\narXiv id / Title\n\nAuthors\n\nPublished date 🔻\n\nLangChain Documentation\n\n`2402.03620v1` [Self-Discover: Large Language Models Self-Compose Reasoning Structures](http://arxiv.org/abs/2402.03620v1)\n\nPei Zhou, Jay Pujara, Xiang Ren, et al.\n\n2024-02-06\n\n`Cookbook:` [self-discover](https://github.com/langchain-ai/langchain/blob/master/cookbook/self-discover.ipynb)\n\n`2401.18059v1` [RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval](http://arxiv.org/abs/2401.18059v1)\n\nParth Sarthi, Salman Abdullah, Aditi Tuli, et al.\n\n2024-01-31\n\n`Cookbook:` [RAPTOR](https://github.com/langchain-ai/langchain/blob/master/cookbook/RAPTOR.ipynb)\n\n`2401.15884v2` [Corrective Retrieval Augmented Generation](http://arxiv.org/abs/2401.15884v2)\n\nShi-Qi Yan, Jia-Chen Gu, Yun Zhu, et al.\n\n2024-01-29\n\n`Cookbook:` [langgraph\\_crag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_crag.ipynb)\n\n`2401.04088v1` [Mixtral of Experts](http://arxiv.org/abs/2401.04088v1)\n\nAlbert Q. Jiang, Alexandre Sablayrolles, Antoine Roux, et al.\n\n2024-01-08\n\n`Cookbook:` [together\\_ai](https://github.com/langchain-ai/langchain/blob/master/cookbook/together_ai.ipynb)\n\n`2312.06648v2` [Dense X Retrieval: What Retrieval Granularity Should We Use?](http://arxiv.org/abs/2312.06648v2)\n\nTong Chen, Hongwei Wang, Sihao Chen, et al.\n\n2023-12-11\n\n`Template:` [propositional-retrieval](https://python.langchain.com/docs/templates/propositional-retrieval)\n\n`2311.09210v1` [Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models](http://arxiv.org/abs/2311.09210v1)\n\nWenhao Yu, Hongming Zhang, Xiaoman Pan, et al.\n\n2023-11-15\n\n`Template:` [chain-of-note-wiki](https://python.langchain.com/docs/templates/chain-of-note-wiki)\n\n`2310.11511v1` [Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection](http://arxiv.org/abs/2310.11511v1)\n\nAkari Asai, Zeqiu Wu, Yizhong Wang, et al.\n\n2023-10-17\n\n`Cookbook:` [langgraph\\_self\\_rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_self_rag.ipynb)\n\n`2310.06117v2` [Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models](http://arxiv.org/abs/2310.06117v2)\n\nHuaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, et al.\n\n2023-10-09\n\n`Template:` [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting), `Cookbook:` [stepback-qa](https://github.com/langchain-ai/langchain/blob/master/cookbook/stepback-qa.ipynb)\n\n`2307.09288v2` [Llama 2: Open Foundation and Fine-Tuned Chat Models](http://arxiv.org/abs/2307.09288v2)\n\nHugo Touvron, Louis Martin, Kevin Stone, et al.\n\n2023-07-18\n\n`Cookbook:` [Semi\\_Structured\\_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_Structured_RAG.ipynb)\n\n`2305.14283v3` [Query Rewriting for Retrieval-Augmented Large Language Models](http://arxiv.org/abs/2305.14283v3)\n\nXinbei Ma, Yeyun Gong, Pengcheng He, et al.\n\n2023-05-23\n\n`Template:` [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read), `Cookbook:` [rewrite](https://github.com/langchain-ai/langchain/blob/master/cookbook/rewrite.ipynb)\n\n`2305.08291v1` [Large Language Model Guided Tree-of-Thought](http://arxiv.org/abs/2305.08291v1)\n\nJieyi Long\n\n2023-05-15\n\n`API:` [langchain\\_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot), `Cookbook:` [tree\\_of\\_thought](https://github.com/langchain-ai/langchain/blob/master/cookbook/tree_of_thought.ipynb)\n\n`2305.04091v3` [Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models](http://arxiv.org/abs/2305.04091v3)\n\nLei Wang, Wanyu Xu, Yihuai Lan, et al.\n\n2023-05-06\n\n`Cookbook:` [plan\\_and\\_execute\\_agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb)\n\n`2304.08485v2` [Visual Instruction Tuning](http://arxiv.org/abs/2304.08485v2)\n\nHaotian Liu, Chunyuan Li, Qingyang Wu, et al.\n\n2023-04-17\n\n`Cookbook:` [Semi\\_structured\\_and\\_multi\\_modal\\_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb), [Semi\\_structured\\_multi\\_modal\\_RAG\\_LLaMA2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb)\n\n`2304.03442v2` [Generative Agents: Interactive Simulacra of Human Behavior](http://arxiv.org/abs/2304.03442v2)\n\nJoon Sung Park, Joseph C. O'Brien, Carrie J. Cai, et al.\n\n2023-04-07\n\n`Cookbook:` [multiagent\\_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb), [generative\\_agents\\_interactive\\_simulacra\\_of\\_human\\_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb)\n\n`2303.17760v2` [CAMEL: Communicative Agents for \"Mind\" Exploration of Large Language Model Society](http://arxiv.org/abs/2303.17760v2)\n\nGuohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, et al.\n\n2023-03-31\n\n`Cookbook:` [camel\\_role\\_playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb)\n\n`2303.17580v4` [HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face](http://arxiv.org/abs/2303.17580v4)\n\nYongliang Shen, Kaitao Song, Xu Tan, et al.\n\n2023-03-30\n\n`API:` [langchain\\_experimental.autonomous\\_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents), `Cookbook:` [hugginggpt](https://github.com/langchain-ai/langchain/blob/master/cookbook/hugginggpt.ipynb)\n\n`2303.08774v6` [GPT-4 Technical Report](http://arxiv.org/abs/2303.08774v6)\n\nOpenAI, Josh Achiam, Steven Adler, et al.\n\n2023-03-15\n\n`Docs:` [docs/integrations/vectorstores/mongodb\\_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas)\n\n`2301.10226v4` [A Watermark for Large Language Models](http://arxiv.org/abs/2301.10226v4)\n\nJohn Kirchenbauer, Jonas Geiping, Yuxin Wen, et al.\n\n2023-01-24\n\n`API:` [langchain\\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain\\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain\\_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain\\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)\n\n`2212.10496v1` [Precise Zero-Shot Dense Retrieval without Relevance Labels](http://arxiv.org/abs/2212.10496v1)\n\nLuyu Gao, Xueguang Ma, Jimmy Lin, et al.\n\n2022-12-20\n\n`API:` [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder), `Template:` [hyde](https://python.langchain.com/docs/templates/hyde), `Cookbook:` [hypothetical\\_document\\_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb)\n\n`2212.07425v3` [Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments](http://arxiv.org/abs/2212.07425v3)\n\nZhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al.\n\n2022-12-12\n\n`API:` [langchain\\_experimental.fallacy\\_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal)\n\n`2211.13892v2` [Complementary Explanations for Effective In-Context Learning](http://arxiv.org/abs/2211.13892v2)\n\nXi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al.\n\n2022-11-25\n\n`API:` [langchain\\_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)\n\n`2211.10435v2` [PAL: Program-aided Language Models](http://arxiv.org/abs/2211.10435v2)\n\nLuyu Gao, Aman Madaan, Shuyan Zhou, et al.\n\n2022-11-18\n\n`API:` [langchain\\_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), [langchain\\_experimental.pal\\_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain), `Cookbook:` [program\\_aided\\_language\\_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb)\n\n`2210.03629v3` [ReAct: Synergizing Reasoning and Acting in Language Models](http://arxiv.org/abs/2210.03629v3)\n\nShunyu Yao, Jeffrey Zhao, Dian Yu, et al.\n\n2022-10-06\n\n`Docs:` [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere), [docs/integrations/chat/huggingface](https://python.langchain.com/docs/integrations/chat/huggingface), [docs/integrations/tools/ionic\\_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping), `API:` [langchain...create\\_react\\_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent), [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain)\n\n`2209.10785v2` [Deep Lake: a Lakehouse for Deep Learning](http://arxiv.org/abs/2209.10785v2)\n\nSasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al.\n\n2022-09-22\n\n`Docs:` [docs/integrations/providers/activeloop\\_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)\n\n`2205.12654v1` [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](http://arxiv.org/abs/2205.12654v1)\n\nKevin Heffernan, Onur Çelebi, Holger Schwenk\n\n2022-05-25\n\n`API:` [langchain\\_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)\n\n`2204.00498v1` [Evaluating the Text-to-SQL Capabilities of Large Language Models](http://arxiv.org/abs/2204.00498v1)\n\nNitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau\n\n2022-03-15\n\n`API:` [langchain\\_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL), [langchain\\_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase)\n\n`2202.00666v5` [Locally Typical Sampling](http://arxiv.org/abs/2202.00666v5)\n\nClara Meister, Tiago Pimentel, Gian Wiher, et al.\n\n2022-02-01\n\n`API:` [langchain\\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain\\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain\\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)\n\n`2103.00020v1` [Learning Transferable Visual Models From Natural Language Supervision](http://arxiv.org/abs/2103.00020v1)\n\nAlec Radford, Jong Wook Kim, Chris Hallacy, et al.\n\n2021-02-26\n\n`API:` [langchain\\_experimental.open\\_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip)\n\n`1909.05858v2` [CTRL: A Conditional Transformer Language Model for Controllable Generation](http://arxiv.org/abs/1909.05858v2)\n\nNitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al.\n\n2019-09-11\n\n`API:` [langchain\\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain\\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain\\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)\n\n`1908.10084v1` [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](http://arxiv.org/abs/1908.10084v1)\n\nNils Reimers, Iryna Gurevych\n\n2019-08-27\n\n`Docs:` [docs/integrations/text\\_embedding/sentence\\_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers)\n\nSelf-Discover: Large Language Models Self-Compose Reasoning Structures[​](#self-discover-large-language-models-self-compose-reasoning-structures \"Direct link to Self-Discover: Large Language Models Self-Compose Reasoning Structures\")\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 2402.03620v1\n \n* **Title:** Self-Discover: Large Language Models Self-Compose Reasoning Structures\n \n* **Authors:** Pei Zhou, Jay Pujara, Xiang Ren, et al.\n \n* **Published Date:** 2024-02-06\n \n* **URL:** [http://arxiv.org/abs/2402.03620v1](http://arxiv.org/abs/2402.03620v1)\n \n* **LangChain:**\n \n * **Cookbook:** [self-discover](https://github.com/langchain-ai/langchain/blob/master/cookbook/self-discover.ipynb)\n\n**Abstract:** We introduce SELF-DISCOVER, a general framework for LLMs to self-discover the task-intrinsic reasoning structures to tackle complex reasoning problems that are challenging for typical prompting methods. Core to the framework is a self-discovery process where LLMs select multiple atomic reasoning modules such as critical thinking and step-by-step thinking, and compose them into an explicit reasoning structure for LLMs to follow during decoding. SELF-DISCOVER substantially improves GPT-4 and PaLM 2's performance on challenging reasoning benchmarks such as BigBench-Hard, grounded agent reasoning, and MATH, by as much as 32% compared to Chain of Thought (CoT). Furthermore, SELF-DISCOVER outperforms inference-intensive methods such as CoT-Self-Consistency by more than 20%, while requiring 10-40x fewer inference compute. Finally, we show that the self-discovered reasoning structures are universally applicable across model families: from PaLM 2-L to GPT-4, and from GPT-4 to Llama2, and share commonalities with human reasoning patterns.\n\nRAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval[​](#raptor-recursive-abstractive-processing-for-tree-organized-retrieval \"Direct link to RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval\")\n--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 2401.18059v1\n \n* **Title:** RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval\n \n* **Authors:** Parth Sarthi, Salman Abdullah, Aditi Tuli, et al.\n \n* **Published Date:** 2024-01-31\n \n* **URL:** [http://arxiv.org/abs/2401.18059v1](http://arxiv.org/abs/2401.18059v1)\n \n* **LangChain:**\n \n * **Cookbook:** [RAPTOR](https://github.com/langchain-ai/langchain/blob/master/cookbook/RAPTOR.ipynb)\n\n**Abstract:** Retrieval-augmented language models can better adapt to changes in world state and incorporate long-tail knowledge. However, most existing methods retrieve only short contiguous chunks from a retrieval corpus, limiting holistic understanding of the overall document context. We introduce the novel approach of recursively embedding, clustering, and summarizing chunks of text, constructing a tree with differing levels of summarization from the bottom up. At inference time, our RAPTOR model retrieves from this tree, integrating information across lengthy documents at different levels of abstraction. Controlled experiments show that retrieval with recursive summaries offers significant improvements over traditional retrieval-augmented LMs on several tasks. On question-answering tasks that involve complex, multi-step reasoning, we show state-of-the-art results; for example, by coupling RAPTOR retrieval with the use of GPT-4, we can improve the best performance on the QuALITY benchmark by 20% in absolute accuracy.\n\nCorrective Retrieval Augmented Generation[​](#corrective-retrieval-augmented-generation \"Direct link to Corrective Retrieval Augmented Generation\")\n---------------------------------------------------------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 2401.15884v2\n \n* **Title:** Corrective Retrieval Augmented Generation\n \n* **Authors:** Shi-Qi Yan, Jia-Chen Gu, Yun Zhu, et al.\n \n* **Published Date:** 2024-01-29\n \n* **URL:** [http://arxiv.org/abs/2401.15884v2](http://arxiv.org/abs/2401.15884v2)\n \n* **LangChain:**\n \n * **Cookbook:** [langgraph\\_crag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_crag.ipynb)\n\n**Abstract:** Large language models (LLMs) inevitably exhibit hallucinations since the accuracy of generated texts cannot be secured solely by the parametric knowledge they encapsulate. Although retrieval-augmented generation (RAG) is a practicable complement to LLMs, it relies heavily on the relevance of retrieved documents, raising concerns about how the model behaves if retrieval goes wrong. To this end, we propose the Corrective Retrieval Augmented Generation (CRAG) to improve the robustness of generation. Specifically, a lightweight retrieval evaluator is designed to assess the overall quality of retrieved documents for a query, returning a confidence degree based on which different knowledge retrieval actions can be triggered. Since retrieval from static and limited corpora can only return sub-optimal documents, large-scale web searches are utilized as an extension for augmenting the retrieval results. Besides, a decompose-then-recompose algorithm is designed for retrieved documents to selectively focus on key information and filter out irrelevant information in them. CRAG is plug-and-play and can be seamlessly coupled with various RAG-based approaches. Experiments on four datasets covering short- and long-form generation tasks show that CRAG can significantly improve the performance of RAG-based approaches.\n\nMixtral of Experts[​](#mixtral-of-experts \"Direct link to Mixtral of Experts\")\n------------------------------------------------------------------------------\n\n* **arXiv id:** 2401.04088v1\n \n* **Title:** Mixtral of Experts\n \n* **Authors:** Albert Q. Jiang, Alexandre Sablayrolles, Antoine Roux, et al.\n \n* **Published Date:** 2024-01-08\n \n* **URL:** [http://arxiv.org/abs/2401.04088v1](http://arxiv.org/abs/2401.04088v1)\n \n* **LangChain:**\n \n * **Cookbook:** [together\\_ai](https://github.com/langchain-ai/langchain/blob/master/cookbook/together_ai.ipynb)\n\n**Abstract:** We introduce Mixtral 8x7B, a Sparse Mixture of Experts (SMoE) language model. Mixtral has the same architecture as Mistral 7B, with the difference that each layer is composed of 8 feedforward blocks (i.e. experts). For every token, at each layer, a router network selects two experts to process the current state and combine their outputs. Even though each token only sees two experts, the selected experts can be different at each timestep. As a result, each token has access to 47B parameters, but only uses 13B active parameters during inference. Mixtral was trained with a context size of 32k tokens and it outperforms or matches Llama 2 70B and GPT-3.5 across all evaluated benchmarks. In particular, Mixtral vastly outperforms Llama 2 70B on mathematics, code generation, and multilingual benchmarks. We also provide a model fine-tuned to follow instructions, Mixtral 8x7B - Instruct, that surpasses GPT-3.5 Turbo, Claude-2.1, Gemini Pro, and Llama 2 70B - chat model on human benchmarks. Both the base and instruct models are released under the Apache 2.0 license.\n\nDense X Retrieval: What Retrieval Granularity Should We Use?[​](#dense-x-retrieval-what-retrieval-granularity-should-we-use \"Direct link to Dense X Retrieval: What Retrieval Granularity Should We Use?\")\n----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 2312.06648v2\n \n* **Title:** Dense X Retrieval: What Retrieval Granularity Should We Use?\n \n* **Authors:** Tong Chen, Hongwei Wang, Sihao Chen, et al.\n \n* **Published Date:** 2023-12-11\n \n* **URL:** [http://arxiv.org/abs/2312.06648v2](http://arxiv.org/abs/2312.06648v2)\n \n* **LangChain:**\n \n * **Template:** [propositional-retrieval](https://python.langchain.com/docs/templates/propositional-retrieval)\n\n**Abstract:** Dense retrieval has become a prominent method to obtain relevant context or world knowledge in open-domain NLP tasks. When we use a learned dense retriever on a retrieval corpus at inference time, an often-overlooked design choice is the retrieval unit in which the corpus is indexed, e.g. document, passage, or sentence. We discover that the retrieval unit choice significantly impacts the performance of both retrieval and downstream tasks. Distinct from the typical approach of using passages or sentences, we introduce a novel retrieval unit, proposition, for dense retrieval. Propositions are defined as atomic expressions within text, each encapsulating a distinct factoid and presented in a concise, self-contained natural language format. We conduct an empirical comparison of different retrieval granularity. Our results reveal that proposition-based retrieval significantly outperforms traditional passage or sentence-based methods in dense retrieval. Moreover, retrieval by proposition also enhances the performance of downstream QA tasks, since the retrieved texts are more condensed with question-relevant information, reducing the need for lengthy input tokens and minimizing the inclusion of extraneous, irrelevant information.\n\nChain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models[​](#chain-of-note-enhancing-robustness-in-retrieval-augmented-language-models \"Direct link to Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models\")\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 2311.09210v1\n \n* **Title:** Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models\n \n* **Authors:** Wenhao Yu, Hongming Zhang, Xiaoman Pan, et al.\n \n* **Published Date:** 2023-11-15\n \n* **URL:** [http://arxiv.org/abs/2311.09210v1](http://arxiv.org/abs/2311.09210v1)\n \n* **LangChain:**\n \n * **Template:** [chain-of-note-wiki](https://python.langchain.com/docs/templates/chain-of-note-wiki)\n\n**Abstract:** Retrieval-augmented language models (RALMs) represent a substantial advancement in the capabilities of large language models, notably in reducing factual hallucination by leveraging external knowledge sources. However, the reliability of the retrieved information is not always guaranteed. The retrieval of irrelevant data can lead to misguided responses, and potentially causing the model to overlook its inherent knowledge, even when it possesses adequate information to address the query. Moreover, standard RALMs often struggle to assess whether they possess adequate knowledge, both intrinsic and retrieved, to provide an accurate answer. In situations where knowledge is lacking, these systems should ideally respond with \"unknown\" when the answer is unattainable. In response to these challenges, we introduces Chain-of-Noting (CoN), a novel approach aimed at improving the robustness of RALMs in facing noisy, irrelevant documents and in handling unknown scenarios. The core idea of CoN is to generate sequential reading notes for retrieved documents, enabling a thorough evaluation of their relevance to the given question and integrating this information to formulate the final answer. We employed ChatGPT to create training data for CoN, which was subsequently trained on an LLaMa-2 7B model. Our experiments across four open-domain QA benchmarks show that RALMs equipped with CoN significantly outperform standard RALMs. Notably, CoN achieves an average improvement of +7.9 in EM score given entirely noisy retrieved documents and +10.5 in rejection rates for real-time questions that fall outside the pre-training knowledge scope.\n\nSelf-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection[​](#self-rag-learning-to-retrieve-generate-and-critique-through-self-reflection \"Direct link to Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection\")\n---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 2310.11511v1\n \n* **Title:** Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection\n \n* **Authors:** Akari Asai, Zeqiu Wu, Yizhong Wang, et al.\n \n* **Published Date:** 2023-10-17\n \n* **URL:** [http://arxiv.org/abs/2310.11511v1](http://arxiv.org/abs/2310.11511v1)\n \n* **LangChain:**\n \n * **Cookbook:** [langgraph\\_self\\_rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_self_rag.ipynb)\n\n**Abstract:** Despite their remarkable capabilities, large language models (LLMs) often produce responses containing factual inaccuracies due to their sole reliance on the parametric knowledge they encapsulate. Retrieval-Augmented Generation (RAG), an ad hoc approach that augments LMs with retrieval of relevant knowledge, decreases such issues. However, indiscriminately retrieving and incorporating a fixed number of retrieved passages, regardless of whether retrieval is necessary, or passages are relevant, diminishes LM versatility or can lead to unhelpful response generation. We introduce a new framework called Self-Reflective Retrieval-Augmented Generation (Self-RAG) that enhances an LM's quality and factuality through retrieval and self-reflection. Our framework trains a single arbitrary LM that adaptively retrieves passages on-demand, and generates and reflects on retrieved passages and its own generations using special tokens, called reflection tokens. Generating reflection tokens makes the LM controllable during the inference phase, enabling it to tailor its behavior to diverse task requirements. Experiments show that Self-RAG (7B and 13B parameters) significantly outperforms state-of-the-art LLMs and retrieval-augmented models on a diverse set of tasks. Specifically, Self-RAG outperforms ChatGPT and retrieval-augmented Llama2-chat on Open-domain QA, reasoning and fact verification tasks, and it shows significant gains in improving factuality and citation accuracy for long-form generations relative to these models.\n\nTake a Step Back: Evoking Reasoning via Abstraction in Large Language Models[​](#take-a-step-back-evoking-reasoning-via-abstraction-in-large-language-models \"Direct link to Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models\")\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 2310.06117v2\n \n* **Title:** Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models\n \n* **Authors:** Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, et al.\n \n* **Published Date:** 2023-10-09\n \n* **URL:** [http://arxiv.org/abs/2310.06117v2](http://arxiv.org/abs/2310.06117v2)\n \n* **LangChain:**\n \n * **Template:** [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting)\n * **Cookbook:** [stepback-qa](https://github.com/langchain-ai/langchain/blob/master/cookbook/stepback-qa.ipynb)\n\n**Abstract:** We present Step-Back Prompting, a simple prompting technique that enables LLMs to do abstractions to derive high-level concepts and first principles from instances containing specific details. Using the concepts and principles to guide reasoning, LLMs significantly improve their abilities in following a correct reasoning path towards the solution. We conduct experiments of Step-Back Prompting with PaLM-2L, GPT-4 and Llama2-70B models, and observe substantial performance gains on various challenging reasoning-intensive tasks including STEM, Knowledge QA, and Multi-Hop Reasoning. For instance, Step-Back Prompting improves PaLM-2L performance on MMLU (Physics and Chemistry) by 7% and 11% respectively, TimeQA by 27%, and MuSiQue by 7%.\n\nLlama 2: Open Foundation and Fine-Tuned Chat Models[​](#llama-2-open-foundation-and-fine-tuned-chat-models \"Direct link to Llama 2: Open Foundation and Fine-Tuned Chat Models\")\n--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 2307.09288v2\n \n* **Title:** Llama 2: Open Foundation and Fine-Tuned Chat Models\n \n* **Authors:** Hugo Touvron, Louis Martin, Kevin Stone, et al.\n \n* **Published Date:** 2023-07-18\n \n* **URL:** [http://arxiv.org/abs/2307.09288v2](http://arxiv.org/abs/2307.09288v2)\n \n* **LangChain:**\n \n * **Cookbook:** [Semi\\_Structured\\_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_Structured_RAG.ipynb)\n\n**Abstract:** In this work, we develop and release Llama 2, a collection of pretrained and fine-tuned large language models (LLMs) ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama 2-Chat, are optimized for dialogue use cases. Our models outperform open-source chat models on most benchmarks we tested, and based on our human evaluations for helpfulness and safety, may be a suitable substitute for closed-source models. We provide a detailed description of our approach to fine-tuning and safety improvements of Llama 2-Chat in order to enable the community to build on our work and contribute to the responsible development of LLMs.\n\nQuery Rewriting for Retrieval-Augmented Large Language Models[​](#query-rewriting-for-retrieval-augmented-large-language-models \"Direct link to Query Rewriting for Retrieval-Augmented Large Language Models\")\n---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 2305.14283v3\n \n* **Title:** Query Rewriting for Retrieval-Augmented Large Language Models\n \n* **Authors:** Xinbei Ma, Yeyun Gong, Pengcheng He, et al.\n \n* **Published Date:** 2023-05-23\n \n* **URL:** [http://arxiv.org/abs/2305.14283v3](http://arxiv.org/abs/2305.14283v3)\n \n* **LangChain:**\n \n * **Template:** [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read)\n * **Cookbook:** [rewrite](https://github.com/langchain-ai/langchain/blob/master/cookbook/rewrite.ipynb)\n\n**Abstract:** Large Language Models (LLMs) play powerful, black-box readers in the retrieve-then-read pipeline, making remarkable progress in knowledge-intensive tasks. This work introduces a new framework, Rewrite-Retrieve-Read instead of the previous retrieve-then-read for the retrieval-augmented LLMs from the perspective of the query rewriting. Unlike prior studies focusing on adapting either the retriever or the reader, our approach pays attention to the adaptation of the search query itself, for there is inevitably a gap between the input text and the needed knowledge in retrieval. We first prompt an LLM to generate the query, then use a web search engine to retrieve contexts. Furthermore, to better align the query to the frozen modules, we propose a trainable scheme for our pipeline. A small language model is adopted as a trainable rewriter to cater to the black-box LLM reader. The rewriter is trained using the feedback of the LLM reader by reinforcement learning. Evaluation is conducted on downstream tasks, open-domain QA and multiple-choice QA. Experiments results show consistent performance improvement, indicating that our framework is proven effective and scalable, and brings a new framework for retrieval-augmented LLM.\n\nLarge Language Model Guided Tree-of-Thought[​](#large-language-model-guided-tree-of-thought \"Direct link to Large Language Model Guided Tree-of-Thought\")\n---------------------------------------------------------------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 2305.08291v1\n \n* **Title:** Large Language Model Guided Tree-of-Thought\n \n* **Authors:** Jieyi Long\n \n* **Published Date:** 2023-05-15\n \n* **URL:** [http://arxiv.org/abs/2305.08291v1](http://arxiv.org/abs/2305.08291v1)\n \n* **LangChain:**\n \n * **API Reference:** [langchain\\_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot)\n * **Cookbook:** [tree\\_of\\_thought](https://github.com/langchain-ai/langchain/blob/master/cookbook/tree_of_thought.ipynb)\n\n**Abstract:** In this paper, we introduce the Tree-of-Thought (ToT) framework, a novel approach aimed at improving the problem-solving capabilities of auto-regressive large language models (LLMs). The ToT technique is inspired by the human mind's approach for solving complex reasoning tasks through trial and error. In this process, the human mind explores the solution space through a tree-like thought process, allowing for backtracking when necessary. To implement ToT as a software system, we augment an LLM with additional modules including a prompter agent, a checker module, a memory module, and a ToT controller. In order to solve a given problem, these modules engage in a multi-round conversation with the LLM. The memory module records the conversation and state history of the problem solving process, which allows the system to backtrack to the previous steps of the thought-process and explore other directions from there. To verify the effectiveness of the proposed technique, we implemented a ToT-based solver for the Sudoku Puzzle. Experimental results show that the ToT framework can significantly increase the success rate of Sudoku puzzle solving. Our implementation of the ToT-based Sudoku solver is available on GitHub: \\\\url{[https://github.com/jieyilong/tree-of-thought-puzzle-solver}](https://github.com/jieyilong/tree-of-thought-puzzle-solver%7D).\n\nPlan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models[​](#plan-and-solve-prompting-improving-zero-shot-chain-of-thought-reasoning-by-large-language-models \"Direct link to Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models\")\n--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 2305.04091v3\n \n* **Title:** Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models\n \n* **Authors:** Lei Wang, Wanyu Xu, Yihuai Lan, et al.\n \n* **Published Date:** 2023-05-06\n \n* **URL:** [http://arxiv.org/abs/2305.04091v3](http://arxiv.org/abs/2305.04091v3)\n \n* **LangChain:**\n \n * **Cookbook:** [plan\\_and\\_execute\\_agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb)\n\n**Abstract:** Large language models (LLMs) have recently been shown to deliver impressive performance in various NLP tasks. To tackle multi-step reasoning tasks, few-shot chain-of-thought (CoT) prompting includes a few manually crafted step-by-step reasoning demonstrations which enable LLMs to explicitly generate reasoning steps and improve their reasoning task accuracy. To eliminate the manual effort, Zero-shot-CoT concatenates the target problem statement with \"Let's think step by step\" as an input prompt to LLMs. Despite the success of Zero-shot-CoT, it still suffers from three pitfalls: calculation errors, missing-step errors, and semantic misunderstanding errors. To address the missing-step errors, we propose Plan-and-Solve (PS) Prompting. It consists of two components: first, devising a plan to divide the entire task into smaller subtasks, and then carrying out the subtasks according to the plan. To address the calculation errors and improve the quality of generated reasoning steps, we extend PS prompting with more detailed instructions and derive PS+ prompting. We evaluate our proposed prompting strategy on ten datasets across three reasoning problems. The experimental results over GPT-3 show that our proposed zero-shot prompting consistently outperforms Zero-shot-CoT across all datasets by a large margin, is comparable to or exceeds Zero-shot-Program-of-Thought Prompting, and has comparable performance with 8-shot CoT prompting on the math reasoning problem. The code can be found at [https://github.com/AGI-Edgerunners/Plan-and-Solve-Prompting](https://github.com/AGI-Edgerunners/Plan-and-Solve-Prompting).\n\nVisual Instruction Tuning[​](#visual-instruction-tuning \"Direct link to Visual Instruction Tuning\")\n---------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 2304.08485v2\n \n* **Title:** Visual Instruction Tuning\n \n* **Authors:** Haotian Liu, Chunyuan Li, Qingyang Wu, et al.\n \n* **Published Date:** 2023-04-17\n \n* **URL:** [http://arxiv.org/abs/2304.08485v2](http://arxiv.org/abs/2304.08485v2)\n \n* **LangChain:**\n \n * **Cookbook:** [Semi\\_structured\\_and\\_multi\\_modal\\_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb), [Semi\\_structured\\_multi\\_modal\\_RAG\\_LLaMA2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb)\n\n**Abstract:** Instruction tuning large language models (LLMs) using machine-generated instruction-following data has improved zero-shot capabilities on new tasks, but the idea is less explored in the multimodal field. In this paper, we present the first attempt to use language-only GPT-4 to generate multimodal language-image instruction-following data. By instruction tuning on such generated data, we introduce LLaVA: Large Language and Vision Assistant, an end-to-end trained large multimodal model that connects a vision encoder and LLM for general-purpose visual and language understanding.Our early experiments show that LLaVA demonstrates impressive multimodel chat abilities, sometimes exhibiting the behaviors of multimodal GPT-4 on unseen images/instructions, and yields a 85.1% relative score compared with GPT-4 on a synthetic multimodal instruction-following dataset. When fine-tuned on Science QA, the synergy of LLaVA and GPT-4 achieves a new state-of-the-art accuracy of 92.53%. We make GPT-4 generated visual instruction tuning data, our model and code base publicly available.\n\nGenerative Agents: Interactive Simulacra of Human Behavior[​](#generative-agents-interactive-simulacra-of-human-behavior \"Direct link to Generative Agents: Interactive Simulacra of Human Behavior\")\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 2304.03442v2\n \n* **Title:** Generative Agents: Interactive Simulacra of Human Behavior\n \n* **Authors:** Joon Sung Park, Joseph C. O'Brien, Carrie J. Cai, et al.\n \n* **Published Date:** 2023-04-07\n \n* **URL:** [http://arxiv.org/abs/2304.03442v2](http://arxiv.org/abs/2304.03442v2)\n \n* **LangChain:**\n \n * **Cookbook:** [multiagent\\_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb), [generative\\_agents\\_interactive\\_simulacra\\_of\\_human\\_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb)\n\n**Abstract:** Believable proxies of human behavior can empower interactive applications ranging from immersive environments to rehearsal spaces for interpersonal communication to prototyping tools. In this paper, we introduce generative agents--computational software agents that simulate believable human behavior. Generative agents wake up, cook breakfast, and head to work; artists paint, while authors write; they form opinions, notice each other, and initiate conversations; they remember and reflect on days past as they plan the next day. To enable generative agents, we describe an architecture that extends a large language model to store a complete record of the agent's experiences using natural language, synthesize those memories over time into higher-level reflections, and retrieve them dynamically to plan behavior. We instantiate generative agents to populate an interactive sandbox environment inspired by The Sims, where end users can interact with a small town of twenty five agents using natural language. In an evaluation, these generative agents produce believable individual and emergent social behaviors: for example, starting with only a single user-specified notion that one agent wants to throw a Valentine's Day party, the agents autonomously spread invitations to the party over the next two days, make new acquaintances, ask each other out on dates to the party, and coordinate to show up for the party together at the right time. We demonstrate through ablation that the components of our agent architecture--observation, planning, and reflection--each contribute critically to the believability of agent behavior. By fusing large language models with computational, interactive agents, this work introduces architectural and interaction patterns for enabling believable simulations of human behavior.\n\nCAMEL: Communicative Agents for \"Mind\" Exploration of Large Language Model Society[​](#camel-communicative-agents-for-mind-exploration-of-large-language-model-society \"Direct link to CAMEL: Communicative Agents for \\\"Mind\\\" Exploration of Large Language Model Society\")\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 2303.17760v2\n \n* **Title:** CAMEL: Communicative Agents for \"Mind\" Exploration of Large Language Model Society\n \n* **Authors:** Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, et al.\n \n* **Published Date:** 2023-03-31\n \n* **URL:** [http://arxiv.org/abs/2303.17760v2](http://arxiv.org/abs/2303.17760v2)\n \n* **LangChain:**\n \n * **Cookbook:** [camel\\_role\\_playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb)\n\n**Abstract:** The rapid advancement of chat-based language models has led to remarkable progress in complex task-solving. However, their success heavily relies on human input to guide the conversation, which can be challenging and time-consuming. This paper explores the potential of building scalable techniques to facilitate autonomous cooperation among communicative agents, and provides insight into their \"cognitive\" processes. To address the challenges of achieving autonomous cooperation, we propose a novel communicative agent framework named role-playing. Our approach involves using inception prompting to guide chat agents toward task completion while maintaining consistency with human intentions. We showcase how role-playing can be used to generate conversational data for studying the behaviors and capabilities of a society of agents, providing a valuable resource for investigating conversational language models. In particular, we conduct comprehensive studies on instruction-following cooperation in multi-agent settings. Our contributions include introducing a novel communicative agent framework, offering a scalable approach for studying the cooperative behaviors and capabilities of multi-agent systems, and open-sourcing our library to support research on communicative agents and beyond: [https://github.com/camel-ai/camel](https://github.com/camel-ai/camel).\n\nHuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face[​](#hugginggpt-solving-ai-tasks-with-chatgpt-and-its-friends-in-hugging-face \"Direct link to HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face\")\n--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 2303.17580v4\n \n* **Title:** HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face\n \n* **Authors:** Yongliang Shen, Kaitao Song, Xu Tan, et al.\n \n* **Published Date:** 2023-03-30\n \n* **URL:** [http://arxiv.org/abs/2303.17580v4](http://arxiv.org/abs/2303.17580v4)\n \n* **LangChain:**\n \n * **API Reference:** [langchain\\_experimental.autonomous\\_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents)\n * **Cookbook:** [hugginggpt](https://github.com/langchain-ai/langchain/blob/master/cookbook/hugginggpt.ipynb)\n\n**Abstract:** Solving complicated AI tasks with different domains and modalities is a key step toward artificial general intelligence. While there are numerous AI models available for various domains and modalities, they cannot handle complicated AI tasks autonomously. Considering large language models (LLMs) have exhibited exceptional abilities in language understanding, generation, interaction, and reasoning, we advocate that LLMs could act as a controller to manage existing AI models to solve complicated AI tasks, with language serving as a generic interface to empower this. Based on this philosophy, we present HuggingGPT, an LLM-powered agent that leverages LLMs (e.g., ChatGPT) to connect various AI models in machine learning communities (e.g., Hugging Face) to solve AI tasks. Specifically, we use ChatGPT to conduct task planning when receiving a user request, select models according to their function descriptions available in Hugging Face, execute each subtask with the selected AI model, and summarize the response according to the execution results. By leveraging the strong language capability of ChatGPT and abundant AI models in Hugging Face, HuggingGPT can tackle a wide range of sophisticated AI tasks spanning different modalities and domains and achieve impressive results in language, vision, speech, and other challenging tasks, which paves a new way towards the realization of artificial general intelligence.\n\nGPT-4 Technical Report[​](#gpt-4-technical-report \"Direct link to GPT-4 Technical Report\")\n------------------------------------------------------------------------------------------\n\n* **arXiv id:** 2303.08774v6\n \n* **Title:** GPT-4 Technical Report\n \n* **Authors:** OpenAI, Josh Achiam, Steven Adler, et al.\n \n* **Published Date:** 2023-03-15\n \n* **URL:** [http://arxiv.org/abs/2303.08774v6](http://arxiv.org/abs/2303.08774v6)\n \n* **LangChain:**\n \n * **Documentation:** [docs/integrations/vectorstores/mongodb\\_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas)\n\n**Abstract:** We report the development of GPT-4, a large-scale, multimodal model which can accept image and text inputs and produce text outputs. While less capable than humans in many real-world scenarios, GPT-4 exhibits human-level performance on various professional and academic benchmarks, including passing a simulated bar exam with a score around the top 10% of test takers. GPT-4 is a Transformer-based model pre-trained to predict the next token in a document. The post-training alignment process results in improved performance on measures of factuality and adherence to desired behavior. A core component of this project was developing infrastructure and optimization methods that behave predictably across a wide range of scales. This allowed us to accurately predict some aspects of GPT-4's performance based on models trained with no more than 1/1,000th the compute of GPT-4.\n\nA Watermark for Large Language Models[​](#a-watermark-for-large-language-models \"Direct link to A Watermark for Large Language Models\")\n---------------------------------------------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 2301.10226v4\n \n* **Title:** A Watermark for Large Language Models\n \n* **Authors:** John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al.\n \n* **Published Date:** 2023-01-24\n \n* **URL:** [http://arxiv.org/abs/2301.10226v4](http://arxiv.org/abs/2301.10226v4)\n \n* **LangChain:**\n \n * **API Reference:** [langchain\\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain\\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain\\_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain\\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)\n\n**Abstract:** Potential harms of large language models can be mitigated by watermarking model output, i.e., embedding signals into generated text that are invisible to humans but algorithmically detectable from a short span of tokens. We propose a watermarking framework for proprietary language models. The watermark can be embedded with negligible impact on text quality, and can be detected using an efficient open-source algorithm without access to the language model API or parameters. The watermark works by selecting a randomized set of \"green\" tokens before a word is generated, and then softly promoting use of green tokens during sampling. We propose a statistical test for detecting the watermark with interpretable p-values, and derive an information-theoretic framework for analyzing the sensitivity of the watermark. We test the watermark using a multi-billion parameter model from the Open Pretrained Transformer (OPT) family, and discuss robustness and security.\n\nPrecise Zero-Shot Dense Retrieval without Relevance Labels[​](#precise-zero-shot-dense-retrieval-without-relevance-labels \"Direct link to Precise Zero-Shot Dense Retrieval without Relevance Labels\")\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 2212.10496v1\n \n* **Title:** Precise Zero-Shot Dense Retrieval without Relevance Labels\n \n* **Authors:** Luyu Gao, Xueguang Ma, Jimmy Lin, et al.\n \n* **Published Date:** 2022-12-20\n \n* **URL:** [http://arxiv.org/abs/2212.10496v1](http://arxiv.org/abs/2212.10496v1)\n \n* **LangChain:**\n \n * **API Reference:** [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder)\n * **Template:** [hyde](https://python.langchain.com/docs/templates/hyde)\n * **Cookbook:** [hypothetical\\_document\\_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb)\n\n**Abstract:** While dense retrieval has been shown effective and efficient across tasks and languages, it remains difficult to create effective fully zero-shot dense retrieval systems when no relevance label is available. In this paper, we recognize the difficulty of zero-shot learning and encoding relevance. Instead, we propose to pivot through Hypothetical Document Embeddings~(HyDE). Given a query, HyDE first zero-shot instructs an instruction-following language model (e.g. InstructGPT) to generate a hypothetical document. The document captures relevance patterns but is unreal and may contain false details. Then, an unsupervised contrastively learned encoder~(e.g. Contriever) encodes the document into an embedding vector. This vector identifies a neighborhood in the corpus embedding space, where similar real documents are retrieved based on vector similarity. This second step ground the generated document to the actual corpus, with the encoder's dense bottleneck filtering out the incorrect details. Our experiments show that HyDE significantly outperforms the state-of-the-art unsupervised dense retriever Contriever and shows strong performance comparable to fine-tuned retrievers, across various tasks (e.g. web search, QA, fact verification) and languages~(e.g. sw, ko, ja).\n\nRobust and Explainable Identification of Logical Fallacies in Natural Language Arguments[​](#robust-and-explainable-identification-of-logical-fallacies-in-natural-language-arguments \"Direct link to Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments\")\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 2212.07425v3\n \n* **Title:** Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments\n \n* **Authors:** Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al.\n \n* **Published Date:** 2022-12-12\n \n* **URL:** [http://arxiv.org/abs/2212.07425v3](http://arxiv.org/abs/2212.07425v3)\n \n* **LangChain:**\n \n * **API Reference:** [langchain\\_experimental.fallacy\\_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal)\n\n**Abstract:** The spread of misinformation, propaganda, and flawed argumentation has been amplified in the Internet era. Given the volume of data and the subtlety of identifying violations of argumentation norms, supporting information analytics tasks, like content moderation, with trustworthy methods that can identify logical fallacies is essential. In this paper, we formalize prior theoretical work on logical fallacies into a comprehensive three-stage evaluation framework of detection, coarse-grained, and fine-grained classification. We adapt existing evaluation datasets for each stage of the evaluation. We employ three families of robust and explainable methods based on prototype reasoning, instance-based reasoning, and knowledge injection. The methods combine language models with background knowledge and explainable mechanisms. Moreover, we address data sparsity with strategies for data augmentation and curriculum learning. Our three-stage framework natively consolidates prior datasets and methods from existing tasks, like propaganda detection, serving as an overarching evaluation testbed. We extensively evaluate these methods on our datasets, focusing on their robustness and explainability. Our results provide insight into the strengths and weaknesses of the methods on different components and fallacy classes, indicating that fallacy identification is a challenging task that may require specialized forms of reasoning to capture various classes. We share our open-source code and data on GitHub to support further work on logical fallacy identification.\n\nComplementary Explanations for Effective In-Context Learning[​](#complementary-explanations-for-effective-in-context-learning \"Direct link to Complementary Explanations for Effective In-Context Learning\")\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 2211.13892v2\n \n* **Title:** Complementary Explanations for Effective In-Context Learning\n \n* **Authors:** Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al.\n \n* **Published Date:** 2022-11-25\n \n* **URL:** [http://arxiv.org/abs/2211.13892v2](http://arxiv.org/abs/2211.13892v2)\n \n* **LangChain:**\n \n * **API Reference:** [langchain\\_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)\n\n**Abstract:** Large language models (LLMs) have exhibited remarkable capabilities in learning from explanations in prompts, but there has been limited understanding of exactly how these explanations function or why they are effective. This work aims to better understand the mechanisms by which explanations are used for in-context learning. We first study the impact of two different factors on the performance of prompts with explanations: the computation trace (the way the solution is decomposed) and the natural language used to express the prompt. By perturbing explanations on three controlled tasks, we show that both factors contribute to the effectiveness of explanations. We further study how to form maximally effective sets of explanations for solving a given test query. We find that LLMs can benefit from the complementarity of the explanation set: diverse reasoning skills shown by different exemplars can lead to better performance. Therefore, we propose a maximal marginal relevance-based exemplar selection approach for constructing exemplar sets that are both relevant as well as complementary, which successfully improves the in-context learning performance across three real-world tasks on multiple LLMs.\n\nPAL: Program-aided Language Models[​](#pal-program-aided-language-models \"Direct link to PAL: Program-aided Language Models\")\n-----------------------------------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 2211.10435v2\n \n* **Title:** PAL: Program-aided Language Models\n \n* **Authors:** Luyu Gao, Aman Madaan, Shuyan Zhou, et al.\n \n* **Published Date:** 2022-11-18\n \n* **URL:** [http://arxiv.org/abs/2211.10435v2](http://arxiv.org/abs/2211.10435v2)\n \n* **LangChain:**\n \n * **API Reference:** [langchain\\_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), [langchain\\_experimental.pal\\_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain)\n * **Cookbook:** [program\\_aided\\_language\\_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb)\n\n**Abstract:** Large language models (LLMs) have recently demonstrated an impressive ability to perform arithmetic and symbolic reasoning tasks, when provided with a few examples at test time (\"few-shot prompting\"). Much of this success can be attributed to prompting methods such as \"chain-of-thought'', which employ LLMs for both understanding the problem description by decomposing it into steps, as well as solving each step of the problem. While LLMs seem to be adept at this sort of step-by-step decomposition, LLMs often make logical and arithmetic mistakes in the solution part, even when the problem is decomposed correctly. In this paper, we present Program-Aided Language models (PAL): a novel approach that uses the LLM to read natural language problems and generate programs as the intermediate reasoning steps, but offloads the solution step to a runtime such as a Python interpreter. With PAL, decomposing the natural language problem into runnable steps remains the only learning task for the LLM, while solving is delegated to the interpreter. We demonstrate this synergy between a neural LLM and a symbolic interpreter across 13 mathematical, symbolic, and algorithmic reasoning tasks from BIG-Bench Hard and other benchmarks. In all these natural language reasoning tasks, generating code using an LLM and reasoning using a Python interpreter leads to more accurate results than much larger models. For example, PAL using Codex achieves state-of-the-art few-shot accuracy on the GSM8K benchmark of math word problems, surpassing PaLM-540B which uses chain-of-thought by absolute 15% top-1. Our code and data are publicly available at [http://reasonwithpal.com/](http://reasonwithpal.com/) .\n\nReAct: Synergizing Reasoning and Acting in Language Models[​](#react-synergizing-reasoning-and-acting-in-language-models \"Direct link to ReAct: Synergizing Reasoning and Acting in Language Models\")\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 2210.03629v3\n \n* **Title:** ReAct: Synergizing Reasoning and Acting in Language Models\n \n* **Authors:** Shunyu Yao, Jeffrey Zhao, Dian Yu, et al.\n \n* **Published Date:** 2022-10-06\n \n* **URL:** [http://arxiv.org/abs/2210.03629v3](http://arxiv.org/abs/2210.03629v3)\n \n* **LangChain:**\n \n * **Documentation:** [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere), [docs/integrations/chat/huggingface](https://python.langchain.com/docs/integrations/chat/huggingface), [docs/integrations/tools/ionic\\_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping)\n * **API Reference:** [langchain...create\\_react\\_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent), [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain)\n\n**Abstract:** While large language models (LLMs) have demonstrated impressive capabilities across tasks in language understanding and interactive decision making, their abilities for reasoning (e.g. chain-of-thought prompting) and acting (e.g. action plan generation) have primarily been studied as separate topics. In this paper, we explore the use of LLMs to generate both reasoning traces and task-specific actions in an interleaved manner, allowing for greater synergy between the two: reasoning traces help the model induce, track, and update action plans as well as handle exceptions, while actions allow it to interface with external sources, such as knowledge bases or environments, to gather additional information. We apply our approach, named ReAct, to a diverse set of language and decision making tasks and demonstrate its effectiveness over state-of-the-art baselines, as well as improved human interpretability and trustworthiness over methods without reasoning or acting components. Concretely, on question answering (HotpotQA) and fact verification (Fever), ReAct overcomes issues of hallucination and error propagation prevalent in chain-of-thought reasoning by interacting with a simple Wikipedia API, and generates human-like task-solving trajectories that are more interpretable than baselines without reasoning traces. On two interactive decision making benchmarks (ALFWorld and WebShop), ReAct outperforms imitation and reinforcement learning methods by an absolute success rate of 34% and 10% respectively, while being prompted with only one or two in-context examples. Project site with code: [https://react-lm.github.io](https://react-lm.github.io)\n\nDeep Lake: a Lakehouse for Deep Learning[​](#deep-lake-a-lakehouse-for-deep-learning \"Direct link to Deep Lake: a Lakehouse for Deep Learning\")\n-----------------------------------------------------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 2209.10785v2\n \n* **Title:** Deep Lake: a Lakehouse for Deep Learning\n \n* **Authors:** Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al.\n \n* **Published Date:** 2022-09-22\n \n* **URL:** [http://arxiv.org/abs/2209.10785v2](http://arxiv.org/abs/2209.10785v2)\n \n* **LangChain:**\n \n * **Documentation:** [docs/integrations/providers/activeloop\\_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)\n\n**Abstract:** Traditional data lakes provide critical data infrastructure for analytical workloads by enabling time travel, running SQL queries, ingesting data with ACID transactions, and visualizing petabyte-scale datasets on cloud storage. They allow organizations to break down data silos, unlock data-driven decision-making, improve operational efficiency, and reduce costs. However, as deep learning usage increases, traditional data lakes are not well-designed for applications such as natural language processing (NLP), audio processing, computer vision, and applications involving non-tabular datasets. This paper presents Deep Lake, an open-source lakehouse for deep learning applications developed at Activeloop. Deep Lake maintains the benefits of a vanilla data lake with one key difference: it stores complex data, such as images, videos, annotations, as well as tabular data, in the form of tensors and rapidly streams the data over the network to (a) Tensor Query Language, (b) in-browser visualization engine, or (c) deep learning frameworks without sacrificing GPU utilization. Datasets stored in Deep Lake can be accessed from PyTorch, TensorFlow, JAX, and integrate with numerous MLOps tools.\n\nBitext Mining Using Distilled Sentence Representations for Low-Resource Languages[​](#bitext-mining-using-distilled-sentence-representations-for-low-resource-languages \"Direct link to Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages\")\n---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 2205.12654v1\n \n* **Title:** Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages\n \n* **Authors:** Kevin Heffernan, Onur Çelebi, Holger Schwenk\n \n* **Published Date:** 2022-05-25\n \n* **URL:** [http://arxiv.org/abs/2205.12654v1](http://arxiv.org/abs/2205.12654v1)\n \n* **LangChain:**\n \n * **API Reference:** [langchain\\_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)\n\n**Abstract:** Scaling multilingual representation learning beyond the hundred most frequent languages is challenging, in particular to cover the long tail of low-resource languages. A promising approach has been to train one-for-all multilingual models capable of cross-lingual transfer, but these models often suffer from insufficient capacity and interference between unrelated languages. Instead, we move away from this approach and focus on training multiple language (family) specific representations, but most prominently enable all languages to still be encoded in the same representational space. To achieve this, we focus on teacher-student training, allowing all encoders to be mutually compatible for bitext mining, and enabling fast learning of new languages. We introduce a new teacher-student training scheme which combines supervised and self-supervised training, allowing encoders to take advantage of monolingual training data, which is valuable in the low-resource setting. Our approach significantly outperforms the original LASER encoder. We study very low-resource languages and handle 50 African languages, many of which are not covered by any other model. For these languages, we train sentence encoders, mine bitexts, and validate the bitexts by training NMT systems.\n\nEvaluating the Text-to-SQL Capabilities of Large Language Models[​](#evaluating-the-text-to-sql-capabilities-of-large-language-models \"Direct link to Evaluating the Text-to-SQL Capabilities of Large Language Models\")\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 2204.00498v1\n \n* **Title:** Evaluating the Text-to-SQL Capabilities of Large Language Models\n \n* **Authors:** Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau\n \n* **Published Date:** 2022-03-15\n \n* **URL:** [http://arxiv.org/abs/2204.00498v1](http://arxiv.org/abs/2204.00498v1)\n \n* **LangChain:**\n \n * **API Reference:** [langchain\\_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL), [langchain\\_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase)\n\n**Abstract:** We perform an empirical evaluation of Text-to-SQL capabilities of the Codex language model. We find that, without any finetuning, Codex is a strong baseline on the Spider benchmark; we also analyze the failure modes of Codex in this setting. Furthermore, we demonstrate on the GeoQuery and Scholar benchmarks that a small number of in-domain examples provided in the prompt enables Codex to perform better than state-of-the-art models finetuned on such few-shot examples.\n\nLocally Typical Sampling[​](#locally-typical-sampling \"Direct link to Locally Typical Sampling\")\n------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 2202.00666v5\n \n* **Title:** Locally Typical Sampling\n \n* **Authors:** Clara Meister, Tiago Pimentel, Gian Wiher, et al.\n \n* **Published Date:** 2022-02-01\n \n* **URL:** [http://arxiv.org/abs/2202.00666v5](http://arxiv.org/abs/2202.00666v5)\n \n* **LangChain:**\n \n * **API Reference:** [langchain\\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain\\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain\\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)\n\n**Abstract:** Today's probabilistic language generators fall short when it comes to producing coherent and fluent text despite the fact that the underlying models perform well under standard metrics, e.g., perplexity. This discrepancy has puzzled the language generation community for the last few years. In this work, we posit that the abstraction of natural language generation as a discrete stochastic process--which allows for an information-theoretic analysis--can provide new insights into the behavior of probabilistic language generators, e.g., why high-probability texts can be dull or repetitive. Humans use language as a means of communicating information, aiming to do so in a simultaneously efficient and error-minimizing manner; in fact, psycholinguistics research suggests humans choose each word in a string with this subconscious goal in mind. We formally define the set of strings that meet this criterion: those for which each word has an information content close to the expected information content, i.e., the conditional entropy of our model. We then propose a simple and efficient procedure for enforcing this criterion when generating from probabilistic models, which we call locally typical sampling. Automatic and human evaluations show that, in comparison to nucleus and top-k sampling, locally typical sampling offers competitive performance (in both abstractive summarization and story generation) in terms of quality while consistently reducing degenerate repetitions.\n\nLearning Transferable Visual Models From Natural Language Supervision[​](#learning-transferable-visual-models-from-natural-language-supervision \"Direct link to Learning Transferable Visual Models From Natural Language Supervision\")\n---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 2103.00020v1\n \n* **Title:** Learning Transferable Visual Models From Natural Language Supervision\n \n* **Authors:** Alec Radford, Jong Wook Kim, Chris Hallacy, et al.\n \n* **Published Date:** 2021-02-26\n \n* **URL:** [http://arxiv.org/abs/2103.00020v1](http://arxiv.org/abs/2103.00020v1)\n \n* **LangChain:**\n \n * **API Reference:** [langchain\\_experimental.open\\_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip)\n\n**Abstract:** State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This restricted form of supervision limits their generality and usability since additional labeled data is needed to specify any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400 million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained model weights at [https://github.com/OpenAI/CLIP](https://github.com/OpenAI/CLIP).\n\nCTRL: A Conditional Transformer Language Model for Controllable Generation[​](#ctrl-a-conditional-transformer-language-model-for-controllable-generation \"Direct link to CTRL: A Conditional Transformer Language Model for Controllable Generation\")\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 1909.05858v2\n \n* **Title:** CTRL: A Conditional Transformer Language Model for Controllable Generation\n \n* **Authors:** Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al.\n \n* **Published Date:** 2019-09-11\n \n* **URL:** [http://arxiv.org/abs/1909.05858v2](http://arxiv.org/abs/1909.05858v2)\n \n* **LangChain:**\n \n * **API Reference:** [langchain\\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain\\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain\\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)\n\n**Abstract:** Large-scale language models show promising text generation capabilities, but users cannot easily control particular aspects of the generated text. We release CTRL, a 1.63 billion-parameter conditional transformer language model, trained to condition on control codes that govern style, content, and task-specific behavior. Control codes were derived from structure that naturally co-occurs with raw text, preserving the advantages of unsupervised learning while providing more explicit control over text generation. These codes also allow CTRL to predict which parts of the training data are most likely given a sequence. This provides a potential method for analyzing large amounts of data via model-based source attribution. We have released multiple full-sized, pretrained versions of CTRL at [https://github.com/salesforce/ctrl](https://github.com/salesforce/ctrl).\n\nSentence-BERT: Sentence Embeddings using Siamese BERT-Networks[​](#sentence-bert-sentence-embeddings-using-siamese-bert-networks \"Direct link to Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks\")\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n* **arXiv id:** 1908.10084v1\n \n* **Title:** Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks\n \n* **Authors:** Nils Reimers, Iryna Gurevych\n \n* **Published Date:** 2019-08-27\n \n* **URL:** [http://arxiv.org/abs/1908.10084v1](http://arxiv.org/abs/1908.10084v1)\n \n* **LangChain:**\n \n * **Documentation:** [docs/integrations/text\\_embedding/sentence\\_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers)\n\n**Abstract:** BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods.\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/additional_resources/arxiv_references.mdx)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n* [Summary](#summary)\n* [Self-Discover: Large Language Models Self-Compose Reasoning Structures](#self-discover-large-language-models-self-compose-reasoning-structures)\n* [RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval](#raptor-recursive-abstractive-processing-for-tree-organized-retrieval)\n* [Corrective Retrieval Augmented Generation](#corrective-retrieval-augmented-generation)\n* [Mixtral of Experts](#mixtral-of-experts)\n* [Dense X Retrieval: What Retrieval Granularity Should We Use?](#dense-x-retrieval-what-retrieval-granularity-should-we-use)\n* [Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models](#chain-of-note-enhancing-robustness-in-retrieval-augmented-language-models)\n* [Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection](#self-rag-learning-to-retrieve-generate-and-critique-through-self-reflection)\n* [Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models](#take-a-step-back-evoking-reasoning-via-abstraction-in-large-language-models)\n* [Llama 2: Open Foundation and Fine-Tuned Chat Models](#llama-2-open-foundation-and-fine-tuned-chat-models)\n* [Query Rewriting for Retrieval-Augmented Large Language Models](#query-rewriting-for-retrieval-augmented-large-language-models)\n* [Large Language Model Guided Tree-of-Thought](#large-language-model-guided-tree-of-thought)\n* [Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models](#plan-and-solve-prompting-improving-zero-shot-chain-of-thought-reasoning-by-large-language-models)\n* [Visual Instruction Tuning](#visual-instruction-tuning)\n* [Generative Agents: Interactive Simulacra of Human Behavior](#generative-agents-interactive-simulacra-of-human-behavior)\n* [CAMEL: Communicative Agents for \"Mind\" Exploration of Large Language Model Society](#camel-communicative-agents-for-mind-exploration-of-large-language-model-society)\n* [HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face](#hugginggpt-solving-ai-tasks-with-chatgpt-and-its-friends-in-hugging-face)\n* [GPT-4 Technical Report](#gpt-4-technical-report)\n* [A Watermark for Large Language Models](#a-watermark-for-large-language-models)\n* [Precise Zero-Shot Dense Retrieval without Relevance Labels](#precise-zero-shot-dense-retrieval-without-relevance-labels)\n* [Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments](#robust-and-explainable-identification-of-logical-fallacies-in-natural-language-arguments)\n* [Complementary Explanations for Effective In-Context Learning](#complementary-explanations-for-effective-in-context-learning)\n* [PAL: Program-aided Language Models](#pal-program-aided-language-models)\n* [ReAct: Synergizing Reasoning and Acting in Language Models](#react-synergizing-reasoning-and-acting-in-language-models)\n* [Deep Lake: a Lakehouse for Deep Learning](#deep-lake-a-lakehouse-for-deep-learning)\n* [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](#bitext-mining-using-distilled-sentence-representations-for-low-resource-languages)\n* [Evaluating the Text-to-SQL Capabilities of Large Language Models](#evaluating-the-text-to-sql-capabilities-of-large-language-models)\n* [Locally Typical Sampling](#locally-typical-sampling)\n* [Learning Transferable Visual Models From Natural Language Supervision](#learning-transferable-visual-models-from-natural-language-supervision)\n* [CTRL: A Conditional Transformer Language Model for Controllable Generation](#ctrl-a-conditional-transformer-language-model-for-controllable-generation)\n* [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](#sentence-bert-sentence-embeddings-using-siamese-bert-networks)"},"last_modified":{"kind":"null"}}},{"rowIdx":1384,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/merge_message_runs/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to merge consecutive messages of the same type\n\nOn this page\n\nHow to merge consecutive messages of the same type\n==================================================\n\nCertain models do not support passing in consecutive messages of the same type (a.k.a. \"runs\" of the same message type).\n\nThe `merge_message_runs` utility makes it easy to merge consecutive messages of the same type.\n\nBasic usage[​](#basic-usage \"Direct link to Basic usage\")\n---------------------------------------------------------\n\n from langchain_core.messages import ( AIMessage, HumanMessage, SystemMessage, merge_message_runs,)messages = [ SystemMessage(\"you're a good assistant.\"), SystemMessage(\"you always respond with a joke.\"), HumanMessage([{\"type\": \"text\", \"text\": \"i wonder why it's called langchain\"}]), HumanMessage(\"and who is harrison chasing anyways\"), AIMessage( 'Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!' ), AIMessage(\"Why, he's probably chasing after the last cup of coffee in the office!\"),]merged = merge_message_runs(messages)print(\"\\n\\n\".join([repr(x) for x in merged]))\n\n**API Reference:**[AIMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html) | [HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) | [SystemMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.system.SystemMessage.html) | [merge\\_message\\_runs](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.utils.merge_message_runs.html)\n\n SystemMessage(content=\"you're a good assistant.\\nyou always respond with a joke.\")HumanMessage(content=[{'type': 'text', 'text': \"i wonder why it's called langchain\"}, 'and who is harrison chasing anyways'])AIMessage(content='Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!\\nWhy, he\\'s probably chasing after the last cup of coffee in the office!')\n\nNotice that if the contents of one of the messages to merge is a list of content blocks then the merged message will have a list of content blocks. And if both messages to merge have string contents then those are concatenated with a newline character.\n\nChaining[​](#chaining \"Direct link to Chaining\")\n------------------------------------------------\n\n`merge_message_runs` can be used in an imperatively (like above) or declaratively, making it easy to compose with other components in a chain:\n\n # pip install -U langchain-anthropicfrom langchain_anthropic import ChatAnthropicllm = ChatAnthropic(model=\"claude-3-sonnet-20240229\", temperature=0)# Notice we don't pass in messages. This creates# a RunnableLambda that takes messages as inputmerger = merge_message_runs()chain = merger | llmchain.invoke(messages)\n\n**API Reference:**[ChatAnthropic](https://api.python.langchain.com/en/latest/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html)\n\n AIMessage(content=[], response_metadata={'id': 'msg_01D6R8Naum57q8qBau9vLBUX', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 84, 'output_tokens': 3}}, id='run-ac0c465b-b54f-4b8b-9295-e5951250d653-0', usage_metadata={'input_tokens': 84, 'output_tokens': 3, 'total_tokens': 87})\n\nLooking at the LangSmith trace we can see that before the messages are passed to the model they are merged: [https://smith.langchain.com/public/ab558677-cac9-4c59-9066-1ecce5bcd87c/r](https://smith.langchain.com/public/ab558677-cac9-4c59-9066-1ecce5bcd87c/r)\n\nLooking at just the merger, we can see that it's a Runnable object that can be invoked like all Runnables:\n\n merger.invoke(messages)\n\n [SystemMessage(content=\"you're a good assistant.\\nyou always respond with a joke.\"), HumanMessage(content=[{'type': 'text', 'text': \"i wonder why it's called langchain\"}, 'and who is harrison chasing anyways']), AIMessage(content='Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!\\nWhy, he\\'s probably chasing after the last cup of coffee in the office!')]\n\nAPI reference[​](#api-reference \"Direct link to API reference\")\n---------------------------------------------------------------\n\nFor a complete description of all arguments head to the API reference: [https://api.python.langchain.com/en/latest/messages/langchain\\_core.messages.utils.merge\\_message\\_runs.html](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.utils.merge_message_runs.html)\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/merge_message_runs.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to split Markdown by Headers\n\n](/v0.2/docs/how_to/markdown_header_metadata_splitter/)[\n\nNext\n\nHow to add message history\n\n](/v0.2/docs/how_to/message_history/)\n\n* [Basic usage](#basic-usage)\n* [Chaining](#chaining)\n* [API reference](#api-reference)"},"last_modified":{"kind":"null"}}},{"rowIdx":1385,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/long_context_reorder/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to reorder retrieved results to mitigate the \"lost in the middle\" effect\n\nHow to reorder retrieved results to mitigate the \"lost in the middle\" effect\n============================================================================\n\nSubstantial performance degradations in [RAG](/v0.2/docs/tutorials/rag/) applications have been [documented](https://arxiv.org/abs/2307.03172) as the number of retrieved documents grows (e.g., beyond ten). In brief: models are liable to miss relevant information in the middle of long contexts.\n\nBy contrast, queries against vector stores will typically return documents in descending order of relevance (e.g., as measured by cosine similarity of [embeddings](/v0.2/docs/concepts/#embedding-models)).\n\nTo mitigate the [\"lost in the middle\"](https://arxiv.org/abs/2307.03172) effect, you can re-order documents after retrieval such that the most relevant documents are positioned at extrema (e.g., the first and last pieces of context), and the least relevant documents are positioned in the middle. In some cases this can help surface the most relevant information to LLMs.\n\nThe [LongContextReorder](https://api.python.langchain.com/en/latest/document_transformers/langchain_community.document_transformers.long_context_reorder.LongContextReorder.html) document transformer implements this re-ordering procedure. Below we demonstrate an example.\n\n %pip install --upgrade --quiet sentence-transformers langchain-chroma langchain langchain-openai langchain-huggingface > /dev/null\n\nFirst we embed some artificial documents and index them in an (in-memory) [Chroma](/v0.2/docs/integrations/providers/chroma/) vector store. We will use [Hugging Face](/v0.2/docs/integrations/text_embedding/huggingfacehub/) embeddings, but any LangChain vector store or embeddings model will suffice.\n\n from langchain_chroma import Chromafrom langchain_huggingface import HuggingFaceEmbeddings# Get embeddings.embeddings = HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\")texts = [ \"Basquetball is a great sport.\", \"Fly me to the moon is one of my favourite songs.\", \"The Celtics are my favourite team.\", \"This is a document about the Boston Celtics\", \"I simply love going to the movies\", \"The Boston Celtics won the game by 20 points\", \"This is just a random text.\", \"Elden Ring is one of the best games in the last 15 years.\", \"L. Kornet is one of the best Celtics players.\", \"Larry Bird was an iconic NBA player.\",]# Create a retrieverretriever = Chroma.from_texts(texts, embedding=embeddings).as_retriever( search_kwargs={\"k\": 10})query = \"What can you tell me about the Celtics?\"# Get relevant documents ordered by relevance scoredocs = retriever.invoke(query)docs\n\n**API Reference:**[HuggingFaceEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_huggingface.embeddings.huggingface.HuggingFaceEmbeddings.html)\n\n [Document(page_content='This is a document about the Boston Celtics'), Document(page_content='The Celtics are my favourite team.'), Document(page_content='L. Kornet is one of the best Celtics players.'), Document(page_content='The Boston Celtics won the game by 20 points'), Document(page_content='Larry Bird was an iconic NBA player.'), Document(page_content='Elden Ring is one of the best games in the last 15 years.'), Document(page_content='Basquetball is a great sport.'), Document(page_content='I simply love going to the movies'), Document(page_content='Fly me to the moon is one of my favourite songs.'), Document(page_content='This is just a random text.')]\n\nNote that documents are returned in descending order of relevance to the query. The `LongContextReorder` document transformer will implement the re-ordering described above:\n\n from langchain_community.document_transformers import LongContextReorder# Reorder the documents:# Less relevant document will be at the middle of the list and more# relevant elements at beginning / end.reordering = LongContextReorder()reordered_docs = reordering.transform_documents(docs)# Confirm that the 4 relevant documents are at beginning and end.reordered_docs\n\n**API Reference:**[LongContextReorder](https://api.python.langchain.com/en/latest/document_transformers/langchain_community.document_transformers.long_context_reorder.LongContextReorder.html)\n\n [Document(page_content='The Celtics are my favourite team.'), Document(page_content='The Boston Celtics won the game by 20 points'), Document(page_content='Elden Ring is one of the best games in the last 15 years.'), Document(page_content='I simply love going to the movies'), Document(page_content='This is just a random text.'), Document(page_content='Fly me to the moon is one of my favourite songs.'), Document(page_content='Basquetball is a great sport.'), Document(page_content='Larry Bird was an iconic NBA player.'), Document(page_content='L. Kornet is one of the best Celtics players.'), Document(page_content='This is a document about the Boston Celtics')]\n\nBelow, we show how to incorporate the re-ordered documents into a simple question-answering chain:\n\n from langchain.chains.combine_documents import create_stuff_documents_chainfrom langchain_core.prompts import PromptTemplatefrom langchain_openai import OpenAIllm = OpenAI()prompt_template = \"\"\"Given these texts:-----{context}-----Please answer the following question:{query}\"\"\"prompt = PromptTemplate( template=prompt_template, input_variables=[\"context\", \"query\"],)# Create and invoke the chain:chain = create_stuff_documents_chain(llm, prompt)response = chain.invoke({\"context\": reordered_docs, \"query\": query})print(response)\n\n**API Reference:**[create\\_stuff\\_documents\\_chain](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.stuff.create_stuff_documents_chain.html) | [PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html) | [OpenAI](https://api.python.langchain.com/en/latest/llms/langchain_openai.llms.base.OpenAI.html)\n\n The Celtics are a professional basketball team and one of the most iconic franchises in the NBA. They are highly regarded and have a large fan base. The team has had many successful seasons and is often considered one of the top teams in the league. They have a strong history and have produced many great players, such as Larry Bird and L. Kornet. The team is based in Boston and is often referred to as the Boston Celtics.\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/long_context_reorder.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to get log probabilities\n\n](/v0.2/docs/how_to/logprobs/)[\n\nNext\n\nHow to split Markdown by Headers\n\n](/v0.2/docs/how_to/markdown_header_metadata_splitter/)"},"last_modified":{"kind":"null"}}},{"rowIdx":1386,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/tutorials/llm_chain/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [Tutorials](/v0.2/docs/tutorials/)\n* Build a Simple LLM Application with LCEL\n\nOn this page\n\nBuild a Simple LLM Application with LCEL\n========================================\n\nIn this quickstart we'll show you how to build a simple LLM application with LangChain. This application will translate text from English into another language. This is a relatively simple LLM application - it's just a single LLM call plus some prompting. Still, this is a great way to get started with LangChain - a lot of features can be built with just some prompting and an LLM call!\n\nAfter reading this tutorial, you'll have a high level overview of:\n\n* Using [language models](/v0.2/docs/concepts/#chat-models)\n \n* Using [PromptTemplates](/v0.2/docs/concepts/#prompt-templates) and [OutputParsers](/v0.2/docs/concepts/#output-parsers)\n \n* Using [LangChain Expression Language (LCEL)](/v0.2/docs/concepts/#langchain-expression-language-lcel) to chain components together\n \n* Debugging and tracing your application using [LangSmith](/v0.2/docs/concepts/#langsmith)\n \n* Deploying your application with [LangServe](/v0.2/docs/concepts/#langserve)\n \n\nLet's dive in!\n\nSetup[​](#setup \"Direct link to Setup\")\n---------------------------------------\n\n### Jupyter Notebook[​](#jupyter-notebook \"Direct link to Jupyter Notebook\")\n\nThis guide (and most of the other guides in the documentation) uses [Jupyter notebooks](https://jupyter.org/) and assumes the reader is as well. Jupyter notebooks are perfect for learning how to work with LLM systems because oftentimes things can go wrong (unexpected output, API down, etc) and going through guides in an interactive environment is a great way to better understand them.\n\nThis and other tutorials are perhaps most conveniently run in a Jupyter notebook. See [here](https://jupyter.org/install) for instructions on how to install.\n\n### Installation[​](#installation \"Direct link to Installation\")\n\nTo install LangChain run:\n\n* Pip\n* Conda\n\n pip install langchain\n\n conda install langchain -c conda-forge\n\nFor more details, see our [Installation guide](/v0.2/docs/how_to/installation/).\n\n### LangSmith[​](#langsmith \"Direct link to LangSmith\")\n\nMany of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with [LangSmith](https://smith.langchain.com).\n\nAfter you sign up at the link above, make sure to set your environment variables to start logging traces:\n\n export LANGCHAIN_TRACING_V2=\"true\"export LANGCHAIN_API_KEY=\"...\"\n\nOr, if in a notebook, you can set them with:\n\n import getpassimport osos.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()\n\nUsing Language Models[​](#using-language-models \"Direct link to Using Language Models\")\n---------------------------------------------------------------------------------------\n\nFirst up, let's learn how to use a language model by itself. LangChain supports many different language models that you can use interchangably - select the one you want to use below!\n\n* OpenAI\n* Anthropic\n* Azure\n* Google\n* Cohere\n* FireworksAI\n* Groq\n* MistralAI\n* TogetherAI\n\n pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"OPENAI_API_KEY\"] = getpass.getpass()from langchain_openai import ChatOpenAImodel = ChatOpenAI(model=\"gpt-4\")\n\n pip install -qU langchain-anthropic\n\n import getpassimport osos.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()from langchain_anthropic import ChatAnthropicmodel = ChatAnthropic(model=\"claude-3-sonnet-20240229\")\n\n pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"AZURE_OPENAI_API_KEY\"] = getpass.getpass()from langchain_openai import AzureChatOpenAImodel = AzureChatOpenAI( azure_endpoint=os.environ[\"AZURE_OPENAI_ENDPOINT\"], azure_deployment=os.environ[\"AZURE_OPENAI_DEPLOYMENT_NAME\"], openai_api_version=os.environ[\"AZURE_OPENAI_API_VERSION\"],)\n\n pip install -qU langchain-google-vertexai\n\n import getpassimport osos.environ[\"GOOGLE_API_KEY\"] = getpass.getpass()from langchain_google_vertexai import ChatVertexAImodel = ChatVertexAI(model=\"gemini-pro\")\n\n pip install -qU langchain-cohere\n\n import getpassimport osos.environ[\"COHERE_API_KEY\"] = getpass.getpass()from langchain_cohere import ChatCoheremodel = ChatCohere(model=\"command-r\")\n\n pip install -qU langchain-fireworks\n\n import getpassimport osos.environ[\"FIREWORKS_API_KEY\"] = getpass.getpass()from langchain_fireworks import ChatFireworksmodel = ChatFireworks(model=\"accounts/fireworks/models/mixtral-8x7b-instruct\")\n\n pip install -qU langchain-groq\n\n import getpassimport osos.environ[\"GROQ_API_KEY\"] = getpass.getpass()from langchain_groq import ChatGroqmodel = ChatGroq(model=\"llama3-8b-8192\")\n\n pip install -qU langchain-mistralai\n\n import getpassimport osos.environ[\"MISTRAL_API_KEY\"] = getpass.getpass()from langchain_mistralai import ChatMistralAImodel = ChatMistralAI(model=\"mistral-large-latest\")\n\n pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"TOGETHER_API_KEY\"] = getpass.getpass()from langchain_openai import ChatOpenAImodel = ChatOpenAI( base_url=\"https://api.together.xyz/v1\", api_key=os.environ[\"TOGETHER_API_KEY\"], model=\"mistralai/Mixtral-8x7B-Instruct-v0.1\",)\n\nLet's first use the model directly. `ChatModel`s are instances of LangChain \"Runnables\", which means they expose a standard interface for interacting with them. To just simply call the model, we can pass in a list of messages to the `.invoke` method.\n\n from langchain_core.messages import HumanMessage, SystemMessagemessages = [ SystemMessage(content=\"Translate the following from English into Italian\"), HumanMessage(content=\"hi!\"),]model.invoke(messages)\n\n**API Reference:**[HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) | [SystemMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.system.SystemMessage.html)\n\n AIMessage(content='ciao!', response_metadata={'token_usage': {'completion_tokens': 3, 'prompt_tokens': 20, 'total_tokens': 23}, 'model_name': 'gpt-4', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-fc5d7c88-9615-48ab-a3c7-425232b562c5-0')\n\nIf we've enable LangSmith, we can see that this run is logged to LangSmith, and can see the [LangSmith trace](https://smith.langchain.com/public/88baa0b2-7c1a-4d09-ba30-a47985dde2ea/r)\n\nOutputParsers[​](#outputparsers \"Direct link to OutputParsers\")\n---------------------------------------------------------------\n\nNotice that the response from the model is an `AIMessage`. This contains a string response along with other metadata about the response. Oftentimes we may just want to work with the string response. We can parse out just this response by using a simple output parser.\n\nWe first import the simple output parser.\n\n from langchain_core.output_parsers import StrOutputParserparser = StrOutputParser()\n\n**API Reference:**[StrOutputParser](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.string.StrOutputParser.html)\n\nOne way to use it is to use it by itself. For example, we could save the result of the language model call and then pass it to the parser.\n\n result = model.invoke(messages)\n\n parser.invoke(result)\n\n 'Ciao!'\n\nMore commonly, we can \"chain\" the model with this output parser. This means this output parser will get called every time in this chain. This chain takes on the input type of the language model (string or list of message) and returns the output type of the output parser (string).\n\nWe can easily create the chain using the `|` operator. The `|` operator is used in LangChain to combine two elements together.\n\n chain = model | parser\n\n chain.invoke(messages)\n\n 'Ciao!'\n\nIf we now look at LangSmith, we can see that the chain has two steps: first the language model is called, then the result of that is passed to the output parser. We can see the [LangSmith trace](https://smith.langchain.com/public/f1bdf656-2739-42f7-ac7f-0f1dd712322f/r)\n\nPrompt Templates[​](#prompt-templates \"Direct link to Prompt Templates\")\n------------------------------------------------------------------------\n\nRight now we are passing a list of messages directly into the language model. Where does this list of messages come from? Usually, it is constructed from a combination of user input and application logic. This application logic usually takes the raw user input and transforms it into a list of messages ready to pass to the language model. Common transformations include adding a system message or formatting a template with the user input.\n\nPromptTemplates are a concept in LangChain designed to assist with this transformation. They take in raw user input and return data (a prompt) that is ready to pass into a language model.\n\nLet's create a PromptTemplate here. It will take in two user variables:\n\n* `language`: The language to translate text into\n* `text`: The text to translate\n\n from langchain_core.prompts import ChatPromptTemplate\n\n**API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html)\n\nFirst, let's create a string that we will format to be the system message:\n\n system_template = \"Translate the following into {language}:\"\n\nNext, we can create the PromptTemplate. This will be a combination of the `system_template` as well as a simpler template for where the put the text\n\n prompt_template = ChatPromptTemplate.from_messages( [(\"system\", system_template), (\"user\", \"{text}\")])\n\nThe input to this prompt template is a dictionary. We can play around with this prompt template by itself to see what it does by itself\n\n result = prompt_template.invoke({\"language\": \"italian\", \"text\": \"hi\"})result\n\n ChatPromptValue(messages=[SystemMessage(content='Translate the following into italian:'), HumanMessage(content='hi')])\n\nWe can see that it returns a `ChatPromptValue` that consists of two messages. If we want to access the messages directly we do:\n\n result.to_messages()\n\n [SystemMessage(content='Translate the following into italian:'), HumanMessage(content='hi')]\n\nChaining together components with LCEL[​](#chaining-together-components-with-lcel \"Direct link to Chaining together components with LCEL\")\n------------------------------------------------------------------------------------------------------------------------------------------\n\nWe can now combine this with the model and the output parser from above using the pipe (`|`) operator:\n\n chain = prompt_template | model | parser\n\n chain.invoke({\"language\": \"italian\", \"text\": \"hi\"})\n\n 'ciao'\n\nThis is a simple example of using [LangChain Expression Language (LCEL)](/v0.2/docs/concepts/#langchain-expression-language-lcel) to chain together LangChain modules. There are several benefits to this approach, including optimized streaming and tracing support.\n\nIf we take a look at the LangSmith trace, we can see all three components show up in the [LangSmith trace](https://smith.langchain.com/public/bc49bec0-6b13-4726-967f-dbd3448b786d/r).\n\nServing with LangServe[​](#serving-with-langserve \"Direct link to Serving with LangServe\")\n------------------------------------------------------------------------------------------\n\nNow that we've built an application, we need to serve it. That's where LangServe comes in. LangServe helps developers deploy LangChain chains as a REST API. You do not need to use LangServe to use LangChain, but in this guide we'll show how you can deploy your app with LangServe.\n\nWhile the first part of this guide was intended to be run in a Jupyter Notebook or script, we will now move out of that. We will be creating a Python file and then interacting with it from the command line.\n\nInstall with:\n\n pip install \"langserve[all]\"\n\n### Server[​](#server \"Direct link to Server\")\n\nTo create a server for our application we'll make a `serve.py` file. This will contain our logic for serving our application. It consists of three things:\n\n1. The definition of our chain that we just built above\n2. Our FastAPI app\n3. A definition of a route from which to serve the chain, which is done with `langserve.add_routes`\n\n #!/usr/bin/env pythonfrom typing import Listfrom fastapi import FastAPIfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.output_parsers import StrOutputParserfrom langchain_openai import ChatOpenAIfrom langserve import add_routes# 1. Create prompt templatesystem_template = \"Translate the following into {language}:\"prompt_template = ChatPromptTemplate.from_messages([ ('system', system_template), ('user', '{text}')])# 2. Create modelmodel = ChatOpenAI()# 3. Create parserparser = StrOutputParser()# 4. Create chainchain = prompt_template | model | parser# 4. App definitionapp = FastAPI( title=\"LangChain Server\", version=\"1.0\", description=\"A simple API server using LangChain's Runnable interfaces\",)# 5. Adding chain routeadd_routes( app, chain, path=\"/chain\",)if __name__ == \"__main__\": import uvicorn uvicorn.run(app, host=\"localhost\", port=8000)\n\n**API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [StrOutputParser](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.string.StrOutputParser.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html)\n\nAnd that's it! If we execute this file:\n\n python serve.py\n\nwe should see our chain being served at [http://localhost:8000](http://localhost:8000).\n\n### Playground[​](#playground \"Direct link to Playground\")\n\nEvery LangServe service comes with a simple [built-in UI](https://github.com/langchain-ai/langserve/blob/main/README.md#playground) for configuring and invoking the application with streaming output and visibility into intermediate steps. Head to [http://localhost:8000/chain/playground/](http://localhost:8000/chain/playground/) to try it out! Pass in the same inputs as before - `{\"language\": \"italian\", \"text\": \"hi\"}` - and it should respond same as before.\n\n### Client[​](#client \"Direct link to Client\")\n\nNow let's set up a client for programmatically interacting with our service. We can easily do this with the `[langserve.RemoteRunnable](/docs/langserve/#client)`. Using this, we can interact with the served chain as if it were running client-side.\n\n from langserve import RemoteRunnableremote_chain = RemoteRunnable(\"http://localhost:8000/chain/\")remote_chain.invoke({\"language\": \"italian\", \"text\": \"hi\"})\n\n 'Ciao'\n\nTo learn more about the many other features of LangServe [head here](/v0.2/docs/langserve/).\n\nConclusion[​](#conclusion \"Direct link to Conclusion\")\n------------------------------------------------------\n\nThat's it! In this tutorial you've learned how to create your first simple LLM application. You've learned how to work with language models, how to parse their outputs, how to create a prompt template, chaining them with LCEL, how to get great observability into chains you create with LangSmith, and how to deploy them with LangServe.\n\nThis just scratches the surface of what you will want to learn to become a proficient AI Engineer. Luckily - we've got a lot of other resources!\n\nFor further reading on the core concepts of LangChain, we've got detailed [Conceptual Guides](/v0.2/docs/concepts/).\n\nIf you have more specific questions on these concepts, check out the following sections of the how-to guides:\n\n* [LangChain Expression Language (LCEL)](/v0.2/docs/how_to/#langchain-expression-language-lcel)\n* [Prompt templates](/v0.2/docs/how_to/#prompt-templates)\n* [Chat models](/v0.2/docs/how_to/#chat-models)\n* [Output parsers](/v0.2/docs/how_to/#output-parsers)\n* [LangServe](/v0.2/docs/langserve/)\n\nAnd the LangSmith docs:\n\n* [LangSmith](https://docs.smith.langchain.com)\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/tutorials/llm_chain.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nTutorials\n\n](/v0.2/docs/tutorials/)[\n\nNext\n\nBuild a Query Analysis System\n\n](/v0.2/docs/tutorials/query_analysis/)\n\n* [Setup](#setup)\n * [Jupyter Notebook](#jupyter-notebook)\n * [Installation](#installation)\n * [LangSmith](#langsmith)\n* [Using Language Models](#using-language-models)\n* [OutputParsers](#outputparsers)\n* [Prompt Templates](#prompt-templates)\n* [Chaining together components with LCEL](#chaining-together-components-with-lcel)\n* [Serving with LangServe](#serving-with-langserve)\n * [Server](#server)\n * [Playground](#playground)\n * [Client](#client)\n* [Conclusion](#conclusion)"},"last_modified":{"kind":"null"}}},{"rowIdx":1387,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/additional_resources/youtube/"},"markdown":{"kind":"string","value":"On this page\n\nYouTube videos\n==============\n\n\\[Updated 2024-05-16\\]\n\n### [Official LangChain YouTube channel](https://www.youtube.com/@LangChain)[​](#official-langchain-youtube-channel \"Direct link to official-langchain-youtube-channel\")\n\n### [Tutorials on YouTube](/v0.2/docs/additional_resources/tutorials/#tutorials)[​](#tutorials-on-youtube \"Direct link to tutorials-on-youtube\")\n\nVideos (sorted by views)[​](#videos-sorted-by-views \"Direct link to Videos (sorted by views)\")\n----------------------------------------------------------------------------------------------\n\nOnly videos with 40K+ views:\n\n* [Using `ChatGPT` with YOUR OWN Data. This is magical. (LangChain `OpenAI API`)](https://youtu.be/9AXP7tCI9PI)\n* [Chat with Multiple `PDFs` | LangChain App Tutorial in Python (Free LLMs and Embeddings)](https://youtu.be/dXxQ0LR-3Hg?si=pjXKhsHRzn10vOqX)\n* [`Hugging Face` + Langchain in 5 mins | Access 200k+ FREE AI models for your AI apps](https://youtu.be/_j7JEDWuqLE?si=psimQscN3qo2dOa9)\n* [LangChain Crash Course For Beginners | LangChain Tutorial](https://youtu.be/nAmC7SoVLd8?si=qJdvyG5-rnjqfdj1)\n* [Vector Embeddings Tutorial – Code Your Own AI Assistant with GPT-4 API + LangChain + NLP](https://youtu.be/yfHHvmaMkcA?si=UBP3yw50cLm3a2nj)\n* [Development with Large Language Models Tutorial – `OpenAI`, Langchain, Agents, `Chroma`](https://youtu.be/xZDB1naRUlk?si=v8J1q6oFHRyTkf7Y)\n* [Langchain: `PDF` Chat App (GUI) | ChatGPT for Your PDF FILES | Step-by-Step Tutorial](https://youtu.be/RIWbalZ7sTo?si=LbKsCcuyv0BtnrTY)\n* [Vector Search `RAG` Tutorial – Combine Your Data with LLMs with Advanced Search](https://youtu.be/JEBDfGqrAUA?si=pD7oxpfwWeJCxfBt)\n* [LangChain Crash Course for Beginners](https://youtu.be/lG7Uxts9SXs?si=Yte4S5afN7KNCw0F)\n* [Learn `RAG` From Scratch – Python AI Tutorial from a LangChain Engineer](https://youtu.be/sVcwVQRHIc8?si=_LN4g0vOgSdtlB3S)\n* [`Llama 2` in LangChain — FIRST Open Source Conversational Agent!](https://youtu.be/6iHVJyX2e50?si=rtq1maPrzWKHbwVV)\n* [LangChain Tutorial for Beginners | Generative AI Series](https://youtu.be/cQUUkZnyoD0?si=KYz-bvcocdqGh9f_)\n* [Chatbots with `RAG`: LangChain Full Walkthrough](https://youtu.be/LhnCsygAvzY?si=yS7T98VLfcWdkDek)\n* [LangChain Explained In 15 Minutes - A MUST Learn For Python Programmers](https://youtu.be/mrjq3lFz23s?si=wkQGcSKUJjuiiEPf)\n* [LLM Project | End to End LLM Project Using Langchain, `OpenAI` in Finance Domain](https://youtu.be/MoqgmWV1fm8?si=oVl-5kJVgd3a07Y_)\n* [What is LangChain?](https://youtu.be/1bUy-1hGZpI?si=NZ0D51VM5y-DhjGe)\n* [`RAG` + Langchain Python Project: Easy AI/Chat For Your Doc](https://youtu.be/tcqEUSNCn8I?si=RLcWPBVLIErRqdmU)\n* [Getting Started With LangChain In 20 Minutes- Build Celebrity Search Application](https://youtu.be/_FpT1cwcSLg?si=X9qVazlXYucN_JBP)\n* [LangChain GEN AI Tutorial – 6 End-to-End Projects using OpenAI, Google `Gemini Pro`, `LLAMA2`](https://youtu.be/x0AnCE9SE4A?si=_92gJYm7kb-V2bi0)\n* [Complete Langchain GEN AI Crash Course With 6 End To End LLM Projects With OPENAI, `LLAMA2`, `Gemini Pro`](https://youtu.be/aWKrL4z5H6w?si=NVLi7Yiq0ccE7xXE)\n* [AI Leader Reveals The Future of AI AGENTS (LangChain CEO)](https://youtu.be/9ZhbA0FHZYc?si=1r4P6kRvKVvEhRgE)\n* [Learn How To Query Pdf using Langchain Open AI in 5 min](https://youtu.be/5Ghv-F1wF_0?si=ZZRjrWfeiFOVrcvu)\n* [Reliable, fully local RAG agents with `LLaMA3`](https://youtu.be/-ROS6gfYIts?si=75CXA8W_BbnkIxcV)\n* [Learn `LangChain.js` - Build LLM apps with JavaScript and `OpenAI`](https://youtu.be/HSZ_uaif57o?si=Icj-RAhwMT-vHaYA)\n* [LLM Project | End to End LLM Project Using LangChain, Google Palm In Ed-Tech Industry](https://youtu.be/AjQPRomyd-k?si=eC3NT6kn02Lhpz-_)\n* [Chatbot Answering from Your Own Knowledge Base: Langchain, `ChatGPT`, `Pinecone`, and `Streamlit`: | Code](https://youtu.be/nAKhxQ3hcMA?si=9Zd_Nd_jiYhtml5w)\n* [LangChain is AMAZING | Quick Python Tutorial](https://youtu.be/I4mFqyqFkxg?si=aJ66qh558OfNAczD)\n* [`GirlfriendGPT` - AI girlfriend with LangChain](https://youtu.be/LiN3D1QZGQw?si=kZR-lnJwixeVrjmh)\n* [Using NEW `MPT-7B` in `Hugging Face` and LangChain](https://youtu.be/DXpk9K7DgMo?si=99JDpV_ueimwJhMi)\n* [LangChain - COMPLETE TUTORIAL - Basics to advanced concept!](https://youtu.be/a89vqgK-Qcs?si=0aVO2EOqsw7GE5e3)\n* [LangChain Agents: Simply Explained!](https://youtu.be/Xi9Ui-9qcPw?si=DCuG7nGx8dxcfhkx)\n* [Chat With Multiple `PDF` Documents With Langchain And Google `Gemini Pro`](https://youtu.be/uus5eLz6smA?si=YUwvHtaZsGeIl0WD)\n* [LLM Project | End to end LLM project Using Langchain, `Google Palm` in Retail Industry](https://youtu.be/4wtrl4hnPT8?si=_eOKPpdLfWu5UXMQ)\n* [Tutorial | Chat with any Website using Python and Langchain](https://youtu.be/bupx08ZgSFg?si=KRrjYZFnuLsstGwW)\n* [Prompt Engineering And LLM's With LangChain In One Shot-Generative AI](https://youtu.be/t2bSApmPzU4?si=87vPQQtYEWTyu2Kx)\n* [Build a Custom Chatbot with `OpenAI`: `GPT-Index` & LangChain | Step-by-Step Tutorial](https://youtu.be/FIDv6nc4CgU?si=gR1u3DUG9lvzBIKK)\n* [Search Your `PDF` App using Langchain, `ChromaDB`, and Open Source LLM: No OpenAI API (Runs on CPU)](https://youtu.be/rIV1EseKwU4?si=UxZEoXSiPai8fXgl)\n* [Building a `RAG` application from scratch using Python, LangChain, and the `OpenAI API`](https://youtu.be/BrsocJb-fAo?si=hvkh9iTGzJ-LnsX-)\n* [Function Calling via `ChatGPT API` - First Look With LangChain](https://youtu.be/0-zlUy7VUjg?si=Vc6LFseckEc6qvuk)\n* [Private GPT, free deployment! Langchain-Chachat helps you easily play with major mainstream AI models! | Zero Degree Commentary](https://youtu.be/3LLUyaHP-3I?si=AZumEeFXsvqaLl0f)\n* [Create a ChatGPT clone using `Streamlit` and LangChain](https://youtu.be/IaTiyQ2oYUQ?si=WbgsYmqPDnMidSUK)\n* [What's next for AI agents ft. LangChain's Harrison Chase](https://youtu.be/pBBe1pk8hf4?si=H4vdBF9nmkNZxiHt)\n* [`LangFlow`: Build Chatbots without Writing Code - LangChain](https://youtu.be/KJ-ux3hre4s?si=TJuDu4bAlva1myNL)\n* [Building a LangChain Custom Medical Agent with Memory](https://youtu.be/6UFtRwWnHws?si=wymYad26VgigRkHy)\n* [`Ollama` meets LangChain](https://youtu.be/k_1pOF1mj8k?si=RlBiCrmaR3s7SnMK)\n* [End To End LLM Langchain Project using `Pinecone` Vector Database](https://youtu.be/erUfLIi9OFM?si=aHpuHXdIEmAfS4eF)\n* [`LLaMA2` with LangChain - Basics | LangChain TUTORIAL](https://youtu.be/cIRzwSXB4Rc?si=FUs0OLVJpzKhut0h)\n* [Understanding `ReACT` with LangChain](https://youtu.be/Eug2clsLtFs?si=imgj534ggxlypS0d)\n\n* * *\n\n\\[Updated 2024-05-16\\]\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/additional_resources/youtube.mdx)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n* [Official LangChain YouTube channel](#official-langchain-youtube-channel)\n* [Tutorials on YouTube](#tutorials-on-youtube)\n* [Videos (sorted by views)](#videos-sorted-by-views)"},"last_modified":{"kind":"null"}}},{"rowIdx":1388,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/markdown_header_metadata_splitter/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to split Markdown by Headers\n\nOn this page\n\nHow to split Markdown by Headers\n================================\n\n### Motivation[​](#motivation \"Direct link to Motivation\")\n\nMany chat or Q+A applications involve chunking input documents prior to embedding and vector storage.\n\n[These notes](https://www.pinecone.io/learn/chunking-strategies/) from Pinecone provide some useful tips:\n\n When a full paragraph or document is embedded, the embedding process considers both the overall context and the relationships between the sentences and phrases within the text. This can result in a more comprehensive vector representation that captures the broader meaning and themes of the text.\n\nAs mentioned, chunking often aims to keep text with common context together. With this in mind, we might want to specifically honor the structure of the document itself. For example, a markdown file is organized by headers. Creating chunks within specific header groups is an intuitive idea. To address this challenge, we can use [MarkdownHeaderTextSplitter](https://api.python.langchain.com/en/latest/markdown/langchain_text_splitters.markdown.MarkdownHeaderTextSplitter.html). This will split a markdown file by a specified set of headers.\n\nFor example, if we want to split this markdown:\n\n md = '# Foo\\n\\n ## Bar\\n\\nHi this is Jim \\nHi this is Joe\\n\\n ## Baz\\n\\n Hi this is Molly' \n\nWe can specify the headers to split on:\n\n [(\"#\", \"Header 1\"),(\"##\", \"Header 2\")]\n\nAnd content is grouped or split by common headers:\n\n {'content': 'Hi this is Jim \\nHi this is Joe', 'metadata': {'Header 1': 'Foo', 'Header 2': 'Bar'}}{'content': 'Hi this is Molly', 'metadata': {'Header 1': 'Foo', 'Header 2': 'Baz'}}\n\nLet's have a look at some examples below.\n\n### Basic usage:[​](#basic-usage \"Direct link to Basic usage:\")\n\n %pip install -qU langchain-text-splitters\n\n from langchain_text_splitters import MarkdownHeaderTextSplitter\n\n**API Reference:**[MarkdownHeaderTextSplitter](https://api.python.langchain.com/en/latest/markdown/langchain_text_splitters.markdown.MarkdownHeaderTextSplitter.html)\n\n markdown_document = \"# Foo\\n\\n ## Bar\\n\\nHi this is Jim\\n\\nHi this is Joe\\n\\n ### Boo \\n\\n Hi this is Lance \\n\\n ## Baz\\n\\n Hi this is Molly\"headers_to_split_on = [ (\"#\", \"Header 1\"), (\"##\", \"Header 2\"), (\"###\", \"Header 3\"),]markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on)md_header_splits = markdown_splitter.split_text(markdown_document)md_header_splits\n\n [Document(page_content='Hi this is Jim \\nHi this is Joe', metadata={'Header 1': 'Foo', 'Header 2': 'Bar'}), Document(page_content='Hi this is Lance', metadata={'Header 1': 'Foo', 'Header 2': 'Bar', 'Header 3': 'Boo'}), Document(page_content='Hi this is Molly', metadata={'Header 1': 'Foo', 'Header 2': 'Baz'})]\n\n type(md_header_splits[0])\n\n langchain_core.documents.base.Document\n\nBy default, `MarkdownHeaderTextSplitter` strips headers being split on from the output chunk's content. This can be disabled by setting `strip_headers = False`.\n\n markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on, strip_headers=False)md_header_splits = markdown_splitter.split_text(markdown_document)md_header_splits\n\n [Document(page_content='# Foo \\n## Bar \\nHi this is Jim \\nHi this is Joe', metadata={'Header 1': 'Foo', 'Header 2': 'Bar'}), Document(page_content='### Boo \\nHi this is Lance', metadata={'Header 1': 'Foo', 'Header 2': 'Bar', 'Header 3': 'Boo'}), Document(page_content='## Baz \\nHi this is Molly', metadata={'Header 1': 'Foo', 'Header 2': 'Baz'})]\n\n### How to return Markdown lines as separate documents[​](#how-to-return-markdown-lines-as-separate-documents \"Direct link to How to return Markdown lines as separate documents\")\n\nBy default, `MarkdownHeaderTextSplitter` aggregates lines based on the headers specified in `headers_to_split_on`. We can disable this by specifying `return_each_line`:\n\n markdown_splitter = MarkdownHeaderTextSplitter( headers_to_split_on, return_each_line=True,)md_header_splits = markdown_splitter.split_text(markdown_document)md_header_splits\n\n [Document(page_content='Hi this is Jim', metadata={'Header 1': 'Foo', 'Header 2': 'Bar'}), Document(page_content='Hi this is Joe', metadata={'Header 1': 'Foo', 'Header 2': 'Bar'}), Document(page_content='Hi this is Lance', metadata={'Header 1': 'Foo', 'Header 2': 'Bar', 'Header 3': 'Boo'}), Document(page_content='Hi this is Molly', metadata={'Header 1': 'Foo', 'Header 2': 'Baz'})]\n\nNote that here header information is retained in the `metadata` for each document.\n\n### How to constrain chunk size:[​](#how-to-constrain-chunk-size \"Direct link to How to constrain chunk size:\")\n\nWithin each markdown group we can then apply any text splitter we want, such as `RecursiveCharacterTextSplitter`, which allows for further control of the chunk size.\n\n markdown_document = \"# Intro \\n\\n ## History \\n\\n Markdown[9] is a lightweight markup language for creating formatted text using a plain-text editor. John Gruber created Markdown in 2004 as a markup language that is appealing to human readers in its source code form.[9] \\n\\n Markdown is widely used in blogging, instant messaging, online forums, collaborative software, documentation pages, and readme files. \\n\\n ## Rise and divergence \\n\\n As Markdown popularity grew rapidly, many Markdown implementations appeared, driven mostly by the need for \\n\\n additional features such as tables, footnotes, definition lists,[note 1] and Markdown inside HTML blocks. \\n\\n #### Standardization \\n\\n From 2012, a group of people, including Jeff Atwood and John MacFarlane, launched what Atwood characterised as a standardisation effort. \\n\\n ## Implementations \\n\\n Implementations of Markdown are available for over a dozen programming languages.\"headers_to_split_on = [ (\"#\", \"Header 1\"), (\"##\", \"Header 2\"),]# MD splitsmarkdown_splitter = MarkdownHeaderTextSplitter( headers_to_split_on=headers_to_split_on, strip_headers=False)md_header_splits = markdown_splitter.split_text(markdown_document)# Char-level splitsfrom langchain_text_splitters import RecursiveCharacterTextSplitterchunk_size = 250chunk_overlap = 30text_splitter = RecursiveCharacterTextSplitter( chunk_size=chunk_size, chunk_overlap=chunk_overlap)# Splitsplits = text_splitter.split_documents(md_header_splits)splits\n\n**API Reference:**[RecursiveCharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.RecursiveCharacterTextSplitter.html)\n\n [Document(page_content='# Intro \\n## History \\nMarkdown[9] is a lightweight markup language for creating formatted text using a plain-text editor. John Gruber created Markdown in 2004 as a markup language that is appealing to human readers in its source code form.[9]', metadata={'Header 1': 'Intro', 'Header 2': 'History'}), Document(page_content='Markdown is widely used in blogging, instant messaging, online forums, collaborative software, documentation pages, and readme files.', metadata={'Header 1': 'Intro', 'Header 2': 'History'}), Document(page_content='## Rise and divergence \\nAs Markdown popularity grew rapidly, many Markdown implementations appeared, driven mostly by the need for \\nadditional features such as tables, footnotes, definition lists,[note 1] and Markdown inside HTML blocks.', metadata={'Header 1': 'Intro', 'Header 2': 'Rise and divergence'}), Document(page_content='#### Standardization \\nFrom 2012, a group of people, including Jeff Atwood and John MacFarlane, launched what Atwood characterised as a standardisation effort.', metadata={'Header 1': 'Intro', 'Header 2': 'Rise and divergence'}), Document(page_content='## Implementations \\nImplementations of Markdown are available for over a dozen programming languages.', metadata={'Header 1': 'Intro', 'Header 2': 'Implementations'})]\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/markdown_header_metadata_splitter.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to reorder retrieved results to mitigate the \"lost in the middle\" effect\n\n](/v0.2/docs/how_to/long_context_reorder/)[\n\nNext\n\nHow to merge consecutive messages of the same type\n\n](/v0.2/docs/how_to/merge_message_runs/)\n\n* [Motivation](#motivation)\n* [Basic usage:](#basic-usage)\n* [How to return Markdown lines as separate documents](#how-to-return-markdown-lines-as-separate-documents)\n* [How to constrain chunk size:](#how-to-constrain-chunk-size)"},"last_modified":{"kind":"null"}}},{"rowIdx":1389,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/message_history/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to add message history\n\nOn this page\n\nHow to add message history\n==========================\n\nPrerequisites\n\nThis guide assumes familiarity with the following concepts:\n\n* [LangChain Expression Language (LCEL)](/v0.2/docs/concepts/#langchain-expression-language)\n* [Chaining runnables](/v0.2/docs/how_to/sequence/)\n* [Configuring chain parameters at runtime](/v0.2/docs/how_to/configure/)\n* [Prompt templates](/v0.2/docs/concepts/#prompt-templates)\n* [Chat Messages](/v0.2/docs/concepts/#message-types)\n\nPassing conversation state into and out a chain is vital when building a chatbot. The [`RunnableWithMessageHistory`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html#langchain_core.runnables.history.RunnableWithMessageHistory) class lets us add message history to certain types of chains. It wraps another Runnable and manages the chat message history for it. Specifically, it loads previous messages in the conversation BEFORE passing it to the Runnable, and it saves the generated response as a message AFTER calling the runnable. This class also enables multiple conversations by saving each conversation with a `session_id` - it then expects a `session_id` to be passed in the config when calling the runnable, and uses that to look up the relevant conversation history.\n\n![index_diagram](/v0.2/assets/images/message_history-4c13b8b9363beb4621d605bf6b5a34b4.png)\n\nIn practice this looks something like:\n\n from langchain_core.runnables.history import RunnableWithMessageHistorywith_message_history = RunnableWithMessageHistory( # The underlying runnable runnable, # A function that takes in a session id and returns a memory object get_session_history, # Other parameters that may be needed to align the inputs/outputs # of the Runnable with the memory object ... )with_message_history.invoke( # The same input as before {\"ability\": \"math\", \"input\": \"What does cosine mean?\"}, # Configuration specifying the `session_id`, # which controls which conversation to load config={\"configurable\": {\"session_id\": \"abc123\"}},)\n\n**API Reference:**[RunnableWithMessageHistory](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html)\n\nIn order to properly set this up there are two main things to consider:\n\n1. How to store and load messages? (this is `get_session_history` in the example above)\n2. What is the underlying Runnable you are wrapping and what are its inputs/outputs? (this is `runnable` in the example above, as well any additional parameters you pass to `RunnableWithMessageHistory` to align the inputs/outputs)\n\nLet's walk through these pieces (and more) below.\n\nHow to store and load messages[​](#how-to-store-and-load-messages \"Direct link to How to store and load messages\")\n------------------------------------------------------------------------------------------------------------------\n\nA key part of this is storing and loading messages. When constructing `RunnableWithMessageHistory` you need to pass in a `get_session_history` function. This function should take in a `session_id` and return a `BaseChatMessageHistory` object.\n\n**What is `session_id`?**\n\n`session_id` is an identifier for the session (conversation) thread that these input messages correspond to. This allows you to maintain several conversations/threads with the same chain at the same time.\n\n**What is `BaseChatMessageHistory`?**\n\n`BaseChatMessageHistory` is a class that can load and save message objects. It will be called by `RunnableWithMessageHistory` to do exactly that. These classes are usually initialized with a session id.\n\nLet's create a `get_session_history` object to use for this example. To keep things simple, we will use a simple SQLiteMessage\n\n ! rm memory.db\n\n from langchain_community.chat_message_histories import SQLChatMessageHistorydef get_session_history(session_id): return SQLChatMessageHistory(session_id, \"sqlite:///memory.db\")\n\n**API Reference:**[SQLChatMessageHistory](https://api.python.langchain.com/en/latest/chat_message_histories/langchain_community.chat_message_histories.sql.SQLChatMessageHistory.html)\n\nCheck out the [memory integrations](https://integrations.langchain.com/memory) page for implementations of chat message histories using other providers (Redis, Postgres, etc).\n\nWhat is the runnable you are trying to wrap?[​](#what-is-the-runnable-you-are-trying-to-wrap \"Direct link to What is the runnable you are trying to wrap?\")\n-----------------------------------------------------------------------------------------------------------------------------------------------------------\n\n`RunnableWithMessageHistory` can only wrap certain types of Runnables. Specifically, it can be used for any Runnable that takes as input one of:\n\n* a sequence of [`BaseMessages`](/v0.2/docs/concepts/#message-types)\n* a dict with a key that takes a sequence of `BaseMessages`\n* a dict with a key that takes the latest message(s) as a string or sequence of `BaseMessages`, and a separate key that takes historical messages\n\nAnd returns as output one of\n\n* a string that can be treated as the contents of an `AIMessage`\n* a sequence of `BaseMessage`\n* a dict with a key that contains a sequence of `BaseMessage`\n\nLet's take a look at some examples to see how it works.\n\n### Setup[​](#setup \"Direct link to Setup\")\n\nFirst we construct a runnable (which here accepts a dict as input and returns a message as output):\n\n* OpenAI\n* Anthropic\n* Azure\n* Google\n* Cohere\n* FireworksAI\n* Groq\n* MistralAI\n* TogetherAI\n\n pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"OPENAI_API_KEY\"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")\n\n pip install -qU langchain-anthropic\n\n import getpassimport osos.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()from langchain_anthropic import ChatAnthropicllm = ChatAnthropic(model=\"claude-3-sonnet-20240229\")\n\n pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"AZURE_OPENAI_API_KEY\"] = getpass.getpass()from langchain_openai import AzureChatOpenAIllm = AzureChatOpenAI( azure_endpoint=os.environ[\"AZURE_OPENAI_ENDPOINT\"], azure_deployment=os.environ[\"AZURE_OPENAI_DEPLOYMENT_NAME\"], openai_api_version=os.environ[\"AZURE_OPENAI_API_VERSION\"],)\n\n pip install -qU langchain-google-vertexai\n\n import getpassimport osos.environ[\"GOOGLE_API_KEY\"] = getpass.getpass()from langchain_google_vertexai import ChatVertexAIllm = ChatVertexAI(model=\"gemini-pro\")\n\n pip install -qU langchain-cohere\n\n import getpassimport osos.environ[\"COHERE_API_KEY\"] = getpass.getpass()from langchain_cohere import ChatCoherellm = ChatCohere(model=\"command-r\")\n\n pip install -qU langchain-fireworks\n\n import getpassimport osos.environ[\"FIREWORKS_API_KEY\"] = getpass.getpass()from langchain_fireworks import ChatFireworksllm = ChatFireworks(model=\"accounts/fireworks/models/mixtral-8x7b-instruct\")\n\n pip install -qU langchain-groq\n\n import getpassimport osos.environ[\"GROQ_API_KEY\"] = getpass.getpass()from langchain_groq import ChatGroqllm = ChatGroq(model=\"llama3-8b-8192\")\n\n pip install -qU langchain-mistralai\n\n import getpassimport osos.environ[\"MISTRAL_API_KEY\"] = getpass.getpass()from langchain_mistralai import ChatMistralAIllm = ChatMistralAI(model=\"mistral-large-latest\")\n\n pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"TOGETHER_API_KEY\"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI( base_url=\"https://api.together.xyz/v1\", api_key=os.environ[\"TOGETHER_API_KEY\"], model=\"mistralai/Mixtral-8x7B-Instruct-v0.1\",)\n\n from langchain_core.messages import HumanMessagefrom langchain_core.runnables.history import RunnableWithMessageHistory\n\n**API Reference:**[HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) | [RunnableWithMessageHistory](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html)\n\n### Messages input, message(s) output[​](#messages-input-messages-output \"Direct link to Messages input, message(s) output\")\n\nThe simplest form is just adding memory to a ChatModel. ChatModels accept a list of messages as input and output a message. This makes it very easy to use `RunnableWithMessageHistory` - no additional configuration is needed!\n\n runnable_with_history = RunnableWithMessageHistory( model, get_session_history,)\n\n runnable_with_history.invoke( [HumanMessage(content=\"hi - im bob!\")], config={\"configurable\": {\"session_id\": \"1\"}},)\n\n AIMessage(content=\"It's nice to meet you, Bob! I'm Claude, an AI assistant created by Anthropic. How can I help you today?\", response_metadata={'id': 'msg_01UHCCMiZz9yNYjt41xUJrtk', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 12, 'output_tokens': 32}}, id='run-55f6a451-606b-4e04-9e39-e03b81035c1f-0', usage_metadata={'input_tokens': 12, 'output_tokens': 32, 'total_tokens': 44})\n\n runnable_with_history.invoke( [HumanMessage(content=\"whats my name?\")], config={\"configurable\": {\"session_id\": \"1\"}},)\n\n AIMessage(content='I\\'m afraid I don\\'t actually know your name - you introduced yourself as Bob, but I don\\'t have any other information about your identity. As an AI assistant, I don\\'t have a way to independently verify people\\'s names or identities. I\\'m happy to continue our conversation, but I\\'ll just refer to you as \"Bob\" since that\\'s the name you provided.', response_metadata={'id': 'msg_018L96tAxiexMKsHBQz22CcE', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 52, 'output_tokens': 80}}, id='run-7399ddb5-bb06-444b-bfb2-2f65674105dd-0', usage_metadata={'input_tokens': 52, 'output_tokens': 80, 'total_tokens': 132})\n\ninfo\n\nNote that in this case the context is preserved via the chat history for the provided `session_id`, so the model knows the users name.\n\nWe can now try this with a new session id and see that it does not remember.\n\n runnable_with_history.invoke( [HumanMessage(content=\"whats my name?\")], config={\"configurable\": {\"session_id\": \"1a\"}},)\n\n AIMessage(content=\"I'm afraid I don't actually know your name. As an AI assistant, I don't have personal information about you unless you provide it to me directly.\", response_metadata={'id': 'msg_01LhbWu7mSKTvKAx7iQpMPzd', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 12, 'output_tokens': 35}}, id='run-cf86cad2-21f2-4525-afc8-09bfd1e8af70-0', usage_metadata={'input_tokens': 12, 'output_tokens': 35, 'total_tokens': 47})\n\ninfo\n\nWhen we pass a different `session_id`, we start a new chat history, so the model does not know what the user's name is.\n\n### Dictionary input, message(s) output[​](#dictionary-input-messages-output \"Direct link to Dictionary input, message(s) output\")\n\nBesides just wrapping a raw model, the next step up is wrapping a prompt + LLM. This now changes the input to be a **dictionary** (because the input to a prompt is a dictionary). This adds two bits of complication.\n\nFirst: a dictionary can have multiple keys, but we only want to save ONE as input. In order to do this, we now now need to specify a key to save as the input.\n\nSecond: once we load the messages, we need to know how to save them to the dictionary. That equates to know which key in the dictionary to save them in. Therefore, we need to specify a key to save the loaded messages in.\n\nPutting it all together, that ends up looking something like:\n\n from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholderprompt = ChatPromptTemplate.from_messages( [ ( \"system\", \"You're an assistant who speaks in {language}. Respond in 20 words or fewer\", ), MessagesPlaceholder(variable_name=\"history\"), (\"human\", \"{input}\"), ])runnable = prompt | modelrunnable_with_history = RunnableWithMessageHistory( runnable, get_session_history, input_messages_key=\"input\", history_messages_key=\"history\",)\n\n**API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [MessagesPlaceholder](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.MessagesPlaceholder.html)\n\ninfo\n\nNote that we've specified `input_messages_key` (the key to be treated as the latest input message) and `history_messages_key` (the key to add historical messages to).\n\n runnable_with_history.invoke( {\"language\": \"italian\", \"input\": \"hi im bob!\"}, config={\"configurable\": {\"session_id\": \"2\"}},)\n\n AIMessage(content='Ciao Bob! È un piacere conoscerti. Come stai oggi?', response_metadata={'id': 'msg_0121ADUEe4G1hMC6zbqFWofr', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 29, 'output_tokens': 23}}, id='run-246a70df-aad6-43d6-a7e8-166d96e0d67e-0', usage_metadata={'input_tokens': 29, 'output_tokens': 23, 'total_tokens': 52})\n\n runnable_with_history.invoke( {\"language\": \"italian\", \"input\": \"whats my name?\"}, config={\"configurable\": {\"session_id\": \"2\"}},)\n\n AIMessage(content='Bob, il tuo nome è Bob.', response_metadata={'id': 'msg_01EDUZG6nRLGeti9KhFN5cek', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 60, 'output_tokens': 12}}, id='run-294b4a72-81bc-4c43-b199-3aafdff87cb3-0', usage_metadata={'input_tokens': 60, 'output_tokens': 12, 'total_tokens': 72})\n\ninfo\n\nNote that in this case the context is preserved via the chat history for the provided `session_id`, so the model knows the users name.\n\nWe can now try this with a new session id and see that it does not remember.\n\n runnable_with_history.invoke( {\"language\": \"italian\", \"input\": \"whats my name?\"}, config={\"configurable\": {\"session_id\": \"2a\"}},)\n\n AIMessage(content='Mi dispiace, non so il tuo nome. Come posso aiutarti?', response_metadata={'id': 'msg_01Lyd9FAGQJTxxAZoFi3sQpQ', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 30, 'output_tokens': 23}}, id='run-19a82197-3b1c-4b5f-a68d-f91f4a2ba523-0', usage_metadata={'input_tokens': 30, 'output_tokens': 23, 'total_tokens': 53})\n\ninfo\n\nWhen we pass a different `session_id`, we start a new chat history, so the model does not know what the user's name is.\n\n### Messages input, dict output[​](#messages-input-dict-output \"Direct link to Messages input, dict output\")\n\nThis format is useful when you are using a model to generate one key in a dictionary.\n\n from langchain_core.messages import HumanMessagefrom langchain_core.runnables import RunnableParallelchain = RunnableParallel({\"output_message\": model})runnable_with_history = RunnableWithMessageHistory( chain, get_session_history, output_messages_key=\"output_message\",)\n\n**API Reference:**[HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) | [RunnableParallel](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html)\n\ninfo\n\nNote that we've specified `output_messages_key` (the key to be treated as the output to save).\n\n runnable_with_history.invoke( [HumanMessage(content=\"hi - im bob!\")], config={\"configurable\": {\"session_id\": \"3\"}},)\n\n {'output_message': AIMessage(content=\"It's nice to meet you, Bob! I'm Claude, an AI assistant created by Anthropic. How can I help you today?\", response_metadata={'id': 'msg_01WWJSyUyGGKuBqTs3h18ZMM', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 12, 'output_tokens': 32}}, id='run-0f50cb43-a734-447c-b535-07c615a0984c-0', usage_metadata={'input_tokens': 12, 'output_tokens': 32, 'total_tokens': 44})}\n\n runnable_with_history.invoke( [HumanMessage(content=\"whats my name?\")], config={\"configurable\": {\"session_id\": \"3\"}},)\n\n {'output_message': AIMessage(content='I\\'m afraid I don\\'t actually know your name - you introduced yourself as Bob, but I don\\'t have any other information about your identity. As an AI assistant, I don\\'t have a way to independently verify people\\'s names or identities. I\\'m happy to continue our conversation, but I\\'ll just refer to you as \"Bob\" since that\\'s the name you provided.', response_metadata={'id': 'msg_01TEGrhfLXTwo36rC7svdTy4', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 52, 'output_tokens': 80}}, id='run-178e8f3f-da21-430d-9edc-ef07797a5e2d-0', usage_metadata={'input_tokens': 52, 'output_tokens': 80, 'total_tokens': 132})}\n\ninfo\n\nNote that in this case the context is preserved via the chat history for the provided `session_id`, so the model knows the users name.\n\nWe can now try this with a new session id and see that it does not remember.\n\n runnable_with_history.invoke( [HumanMessage(content=\"whats my name?\")], config={\"configurable\": {\"session_id\": \"3a\"}},)\n\n {'output_message': AIMessage(content=\"I'm afraid I don't actually know your name. As an AI assistant, I don't have personal information about you unless you provide it to me directly.\", response_metadata={'id': 'msg_0118ZBudDXAC9P6smf91NhCX', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 12, 'output_tokens': 35}}, id='run-deb14a3a-0336-42b4-8ace-ad1e52ca5910-0', usage_metadata={'input_tokens': 12, 'output_tokens': 35, 'total_tokens': 47})}\n\ninfo\n\nWhen we pass a different `session_id`, we start a new chat history, so the model does not know what the user's name is.\n\n### Dict with single key for all messages input, messages output[​](#dict-with-single-key-for-all-messages-input-messages-output \"Direct link to Dict with single key for all messages input, messages output\")\n\nThis is a specific case of \"Dictionary input, message(s) output\". In this situation, because there is only a single key we don't need to specify as much - we only need to specify the `input_messages_key`.\n\n from operator import itemgetterrunnable_with_history = RunnableWithMessageHistory( itemgetter(\"input_messages\") | model, get_session_history, input_messages_key=\"input_messages\",)\n\ninfo\n\nNote that we've specified `input_messages_key` (the key to be treated as the latest input message).\n\n runnable_with_history.invoke( {\"input_messages\": [HumanMessage(content=\"hi - im bob!\")]}, config={\"configurable\": {\"session_id\": \"4\"}},)\n\n AIMessage(content=\"It's nice to meet you, Bob! I'm Claude, an AI assistant created by Anthropic. How can I help you today?\", response_metadata={'id': 'msg_01UdD5wz1J5xwoz5D94onaQC', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 12, 'output_tokens': 32}}, id='run-91bee6eb-0814-4557-ad71-fef9b0270358-0', usage_metadata={'input_tokens': 12, 'output_tokens': 32, 'total_tokens': 44})\n\n runnable_with_history.invoke( {\"input_messages\": [HumanMessage(content=\"whats my name?\")]}, config={\"configurable\": {\"session_id\": \"4\"}},)\n\n AIMessage(content='I\\'m afraid I don\\'t actually know your name - you introduced yourself as Bob, but I don\\'t have any other information about your identity. As an AI assistant, I don\\'t have a way to independently verify people\\'s names or identities. I\\'m happy to continue our conversation, but I\\'ll just refer to you as \"Bob\" since that\\'s the name you provided.', response_metadata={'id': 'msg_012WUygxBKXcVJPeTW14LNrc', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 52, 'output_tokens': 80}}, id='run-fcbaaa1a-8c33-4eec-b0b0-5b800a47bddd-0', usage_metadata={'input_tokens': 52, 'output_tokens': 80, 'total_tokens': 132})\n\ninfo\n\nNote that in this case the context is preserved via the chat history for the provided `session_id`, so the model knows the users name.\n\nWe can now try this with a new session id and see that it does not remember.\n\n runnable_with_history.invoke( {\"input_messages\": [HumanMessage(content=\"whats my name?\")]}, config={\"configurable\": {\"session_id\": \"4a\"}},)\n\n AIMessage(content=\"I'm afraid I don't actually know your name. As an AI assistant, I don't have personal information about you unless you provide it to me directly.\", response_metadata={'id': 'msg_017xW3Ki5y4UBYzCU9Mf1pgM', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 12, 'output_tokens': 35}}, id='run-d2f372f7-3679-4a5c-9331-a55b820ec03e-0', usage_metadata={'input_tokens': 12, 'output_tokens': 35, 'total_tokens': 47})\n\ninfo\n\nWhen we pass a different `session_id`, we start a new chat history, so the model does not know what the user's name is.\n\nCustomization[​](#customization \"Direct link to Customization\")\n---------------------------------------------------------------\n\nThe configuration parameters by which we track message histories can be customized by passing in a list of `ConfigurableFieldSpec` objects to the `history_factory_config` parameter. Below, we use two parameters: a `user_id` and `conversation_id`.\n\n from langchain_core.runnables import ConfigurableFieldSpecdef get_session_history(user_id: str, conversation_id: str): return SQLChatMessageHistory(f\"{user_id}--{conversation_id}\", \"sqlite:///memory.db\")with_message_history = RunnableWithMessageHistory( runnable, get_session_history, input_messages_key=\"input\", history_messages_key=\"history\", history_factory_config=[ ConfigurableFieldSpec( id=\"user_id\", annotation=str, name=\"User ID\", description=\"Unique identifier for the user.\", default=\"\", is_shared=True, ), ConfigurableFieldSpec( id=\"conversation_id\", annotation=str, name=\"Conversation ID\", description=\"Unique identifier for the conversation.\", default=\"\", is_shared=True, ), ],)with_message_history.invoke( {\"language\": \"italian\", \"input\": \"hi im bob!\"}, config={\"configurable\": {\"user_id\": \"123\", \"conversation_id\": \"1\"}},)\n\n**API Reference:**[ConfigurableFieldSpec](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.utils.ConfigurableFieldSpec.html)\n\n AIMessage(content='Ciao Bob! È un piacere conoscerti. Come stai oggi?', response_metadata={'id': 'msg_016RJebCoiAgWaNcbv9wrMNW', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 29, 'output_tokens': 23}}, id='run-40425414-8f72-47d4-bf1d-a84175d8b3f8-0', usage_metadata={'input_tokens': 29, 'output_tokens': 23, 'total_tokens': 52})\n\n # rememberswith_message_history.invoke( {\"language\": \"italian\", \"input\": \"whats my name?\"}, config={\"configurable\": {\"user_id\": \"123\", \"conversation_id\": \"1\"}},)\n\n AIMessage(content='Bob, il tuo nome è Bob.', response_metadata={'id': 'msg_01Kktiy3auFDKESY54KtTWPX', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 60, 'output_tokens': 12}}, id='run-c7768420-3f30-43f5-8834-74b1979630dd-0', usage_metadata={'input_tokens': 60, 'output_tokens': 12, 'total_tokens': 72})\n\n # New user_id --> does not rememberwith_message_history.invoke( {\"language\": \"italian\", \"input\": \"whats my name?\"}, config={\"configurable\": {\"user_id\": \"456\", \"conversation_id\": \"1\"}},)\n\n AIMessage(content='Mi dispiace, non so il tuo nome. Come posso aiutarti?', response_metadata={'id': 'msg_0178FpbpPNioB7kqvyHk7rjD', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 30, 'output_tokens': 23}}, id='run-df1f1768-aab6-4aec-8bba-e33fc9e90b8d-0', usage_metadata={'input_tokens': 30, 'output_tokens': 23, 'total_tokens': 53})\n\nNote that in this case the context was preserved for the same `user_id`, but once we changed it, the new chat history was started, even though the `conversation_id` was the same.\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/message_history.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to merge consecutive messages of the same type\n\n](/v0.2/docs/how_to/merge_message_runs/)[\n\nNext\n\nHow to migrate from legacy LangChain agents to LangGraph\n\n](/v0.2/docs/how_to/migrate_agent/)\n\n* [How to store and load messages](#how-to-store-and-load-messages)\n* [What is the runnable you are trying to wrap?](#what-is-the-runnable-you-are-trying-to-wrap)\n * [Setup](#setup)\n * [Messages input, message(s) output](#messages-input-messages-output)\n * [Dictionary input, message(s) output](#dictionary-input-messages-output)\n * [Messages input, dict output](#messages-input-dict-output)\n * [Dict with single key for all messages input, messages output](#dict-with-single-key-for-all-messages-input-messages-output)\n* [Customization](#customization)"},"last_modified":{"kind":"null"}}},{"rowIdx":1390,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/tutorials/graph/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [Tutorials](/v0.2/docs/tutorials/)\n* Build a Question Answering application over a Graph Database\n\nOn this page\n\nBuild a Question Answering application over a Graph Database\n============================================================\n\nIn this guide we'll go over the basic ways to create a Q&A chain over a graph database. These systems will allow us to ask a question about the data in a graph database and get back a natural language answer.\n\n⚠️ Security note ⚠️[​](#️-security-note-️ \"Direct link to ⚠️ Security note ⚠️\")\n-------------------------------------------------------------------------------\n\nBuilding Q&A systems of graph databases requires executing model-generated graph queries. There are inherent risks in doing this. Make sure that your database connection permissions are always scoped as narrowly as possible for your chain/agent's needs. This will mitigate though not eliminate the risks of building a model-driven system. For more on general security best practices, [see here](/v0.2/docs/security/).\n\nArchitecture[​](#architecture \"Direct link to Architecture\")\n------------------------------------------------------------\n\nAt a high-level, the steps of most graph chains are:\n\n1. **Convert question to a graph database query**: Model converts user input to a graph database query (e.g. Cypher).\n2. **Execute graph database query**: Execute the graph database query.\n3. **Answer the question**: Model responds to user input using the query results.\n\n![sql_usecase.png](/v0.2/assets/images/graph_usecase-34d891523e6284bb6230b38c5f8392e5.png)\n\nSetup[​](#setup \"Direct link to Setup\")\n---------------------------------------\n\nFirst, get required packages and set environment variables. In this example, we will be using Neo4j graph database.\n\n %pip install --upgrade --quiet langchain langchain-community langchain-openai neo4j\n\nWe default to OpenAI models in this guide.\n\n import getpassimport osos.environ[\"OPENAI_API_KEY\"] = getpass.getpass()# Uncomment the below to use LangSmith. Not required.# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n\n ········\n\nNext, we need to define Neo4j credentials. Follow [these installation steps](https://neo4j.com/docs/operations-manual/current/installation/) to set up a Neo4j database.\n\n os.environ[\"NEO4J_URI\"] = \"bolt://localhost:7687\"os.environ[\"NEO4J_USERNAME\"] = \"neo4j\"os.environ[\"NEO4J_PASSWORD\"] = \"password\"\n\nThe below example will create a connection with a Neo4j database and will populate it with example data about movies and their actors.\n\n from langchain_community.graphs import Neo4jGraphgraph = Neo4jGraph()# Import movie informationmovies_query = \"\"\"LOAD CSV WITH HEADERS FROM 'https://raw.githubusercontent.com/tomasonjo/blog-datasets/main/movies/movies_small.csv'AS rowMERGE (m:Movie {id:row.movieId})SET m.released = date(row.released), m.title = row.title, m.imdbRating = toFloat(row.imdbRating)FOREACH (director in split(row.director, '|') | MERGE (p:Person {name:trim(director)}) MERGE (p)-[:DIRECTED]->(m))FOREACH (actor in split(row.actors, '|') | MERGE (p:Person {name:trim(actor)}) MERGE (p)-[:ACTED_IN]->(m))FOREACH (genre in split(row.genres, '|') | MERGE (g:Genre {name:trim(genre)}) MERGE (m)-[:IN_GENRE]->(g))\"\"\"graph.query(movies_query)\n\n**API Reference:**[Neo4jGraph](https://api.python.langchain.com/en/latest/graphs/langchain_community.graphs.neo4j_graph.Neo4jGraph.html)\n\n []\n\nGraph schema[​](#graph-schema \"Direct link to Graph schema\")\n------------------------------------------------------------\n\nIn order for an LLM to be able to generate a Cypher statement, it needs information about the graph schema. When you instantiate a graph object, it retrieves the information about the graph schema. If you later make any changes to the graph, you can run the `refresh_schema` method to refresh the schema information.\n\n graph.refresh_schema()print(graph.schema)\n\n Node properties are the following:Movie {imdbRating: FLOAT, id: STRING, released: DATE, title: STRING},Person {name: STRING},Genre {name: STRING},Chunk {id: STRING, question: STRING, query: STRING, text: STRING, embedding: LIST}Relationship properties are the following:The relationships are the following:(:Movie)-[:IN_GENRE]->(:Genre),(:Person)-[:DIRECTED]->(:Movie),(:Person)-[:ACTED_IN]->(:Movie)\n\nGreat! We've got a graph database that we can query. Now let's try hooking it up to an LLM.\n\nChain[​](#chain \"Direct link to Chain\")\n---------------------------------------\n\nLet's use a simple chain that takes a question, turns it into a Cypher query, executes the query, and uses the result to answer the original question.\n\n![graph_chain.webp](/v0.2/assets/images/graph_chain-6379941793e0fa985e51e4bda0329403.webp)\n\nLangChain comes with a built-in chain for this workflow that is designed to work with Neo4j: [GraphCypherQAChain](/v0.2/docs/integrations/graphs/neo4j_cypher/)\n\n from langchain.chains import GraphCypherQAChainfrom langchain_openai import ChatOpenAIllm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)chain = GraphCypherQAChain.from_llm(graph=graph, llm=llm, verbose=True)response = chain.invoke({\"query\": \"What was the cast of the Casino?\"})response\n\n**API Reference:**[GraphCypherQAChain](https://api.python.langchain.com/en/latest/chains/langchain_community.chains.graph_qa.cypher.GraphCypherQAChain.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html)\n\n \u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0mGenerated Cypher:\u001b[32;1m\u001b[1;3mMATCH (:Movie {title: \"Casino\"})<-[:ACTED_IN]-(actor:Person)RETURN actor.name\u001b[0mFull Context:\u001b[32;1m\u001b[1;3m[{'actor.name': 'Joe Pesci'}, {'actor.name': 'Robert De Niro'}, {'actor.name': 'Sharon Stone'}, {'actor.name': 'James Woods'}]\u001b[0m\u001b[1m> Finished chain.\u001b[0m\n\n {'query': 'What was the cast of the Casino?', 'result': 'The cast of Casino included Joe Pesci, Robert De Niro, Sharon Stone, and James Woods.'}\n\nValidating relationship direction\n=================================\n\nLLMs can struggle with relationship directions in generated Cypher statement. Since the graph schema is predefined, we can validate and optionally correct relationship directions in the generated Cypher statements by using the `validate_cypher` parameter.\n\n chain = GraphCypherQAChain.from_llm( graph=graph, llm=llm, verbose=True, validate_cypher=True)response = chain.invoke({\"query\": \"What was the cast of the Casino?\"})response\n\n \u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0mGenerated Cypher:\u001b[32;1m\u001b[1;3mMATCH (:Movie {title: \"Casino\"})<-[:ACTED_IN]-(actor:Person)RETURN actor.name\u001b[0mFull Context:\u001b[32;1m\u001b[1;3m[{'actor.name': 'Joe Pesci'}, {'actor.name': 'Robert De Niro'}, {'actor.name': 'Sharon Stone'}, {'actor.name': 'James Woods'}]\u001b[0m\u001b[1m> Finished chain.\u001b[0m\n\n {'query': 'What was the cast of the Casino?', 'result': 'The cast of Casino included Joe Pesci, Robert De Niro, Sharon Stone, and James Woods.'}\n\n### Next steps[​](#next-steps \"Direct link to Next steps\")\n\nFor more complex query-generation, we may want to create few-shot prompts or add query-checking steps. For advanced techniques like this and more check out:\n\n* [Prompting strategies](/v0.2/docs/how_to/graph_prompting/): Advanced prompt engineering techniques.\n* [Mapping values](/v0.2/docs/how_to/graph_mapping/): Techniques for mapping values from questions to database.\n* [Semantic layer](/v0.2/docs/how_to/graph_semantic/): Techniques for implementing semantic layers.\n* [Constructing graphs](/v0.2/docs/how_to/graph_constructing/): Techniques for constructing knowledge graphs.\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/tutorials/graph.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nTutorials\n\n](/v0.2/docs/tutorials/)[\n\nNext\n\nTutorials\n\n](/v0.2/docs/tutorials/)\n\n* [⚠️ Security note ⚠️](#️-security-note-️)\n* [Architecture](#architecture)\n* [Setup](#setup)\n* [Graph schema](#graph-schema)\n* [Chain](#chain)\n * [Next steps](#next-steps)"},"last_modified":{"kind":"null"}}},{"rowIdx":1391,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/multi_vector/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to retrieve using multiple vectors per document\n\nOn this page\n\nHow to retrieve using multiple vectors per document\n===================================================\n\nIt can often be useful to store multiple vectors per document. There are multiple use cases where this is beneficial. For example, we can embed multiple chunks of a document and associate those embeddings with the parent document, allowing retriever hits on the chunks to return the larger document.\n\nLangChain implements a base [MultiVectorRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.multi_vector.MultiVectorRetriever.html), which simplifies this process. Much of the complexity lies in how to create the multiple vectors per document. This notebook covers some of the common ways to create those vectors and use the `MultiVectorRetriever`.\n\nThe methods to create multiple vectors per document include:\n\n* Smaller chunks: split a document into smaller chunks, and embed those (this is [ParentDocumentRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.parent_document_retriever.ParentDocumentRetriever.html)).\n* Summary: create a summary for each document, embed that along with (or instead of) the document.\n* Hypothetical questions: create hypothetical questions that each document would be appropriate to answer, embed those along with (or instead of) the document.\n\nNote that this also enables another method of adding embeddings - manually. This is useful because you can explicitly add questions or queries that should lead to a document being recovered, giving you more control.\n\nBelow we walk through an example. First we instantiate some documents. We will index them in an (in-memory) [Chroma](/v0.2/docs/integrations/providers/chroma/) vector store using [OpenAI](https://python.langchain.com/v0.2/docs/integrations/text_embedding/openai/) embeddings, but any LangChain vector store or embeddings model will suffice.\n\n %pip install --upgrade --quiet langchain-chroma langchain langchain-openai > /dev/null\n\n from langchain.storage import InMemoryByteStorefrom langchain_chroma import Chromafrom langchain_community.document_loaders import TextLoaderfrom langchain_openai import OpenAIEmbeddingsfrom langchain_text_splitters import RecursiveCharacterTextSplitterloaders = [ TextLoader(\"paul_graham_essay.txt\"), TextLoader(\"state_of_the_union.txt\"),]docs = []for loader in loaders: docs.extend(loader.load())text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000)docs = text_splitter.split_documents(docs)# The vectorstore to use to index the child chunksvectorstore = Chroma( collection_name=\"full_documents\", embedding_function=OpenAIEmbeddings())\n\n**API Reference:**[InMemoryByteStore](https://api.python.langchain.com/en/latest/stores/langchain_core.stores.InMemoryByteStore.html) | [TextLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.text.TextLoader.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) | [RecursiveCharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.RecursiveCharacterTextSplitter.html)\n\nSmaller chunks[​](#smaller-chunks \"Direct link to Smaller chunks\")\n------------------------------------------------------------------\n\nOften times it can be useful to retrieve larger chunks of information, but embed smaller chunks. This allows for embeddings to capture the semantic meaning as closely as possible, but for as much context as possible to be passed downstream. Note that this is what the [ParentDocumentRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.parent_document_retriever.ParentDocumentRetriever.html) does. Here we show what is going on under the hood.\n\nWe will make a distinction between the vector store, which indexes embeddings of the (sub) documents, and the document store, which houses the \"parent\" documents and associates them with an identifier.\n\n import uuidfrom langchain.retrievers.multi_vector import MultiVectorRetriever# The storage layer for the parent documentsstore = InMemoryByteStore()id_key = \"doc_id\"# The retriever (empty to start)retriever = MultiVectorRetriever( vectorstore=vectorstore, byte_store=store, id_key=id_key,)doc_ids = [str(uuid.uuid4()) for _ in docs]\n\n**API Reference:**[MultiVectorRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.multi_vector.MultiVectorRetriever.html)\n\nWe next generate the \"sub\" documents by splitting the original documents. Note that we store the document identifier in the `metadata` of the corresponding [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) object.\n\n # The splitter to use to create smaller chunkschild_text_splitter = RecursiveCharacterTextSplitter(chunk_size=400)sub_docs = []for i, doc in enumerate(docs): _id = doc_ids[i] _sub_docs = child_text_splitter.split_documents([doc]) for _doc in _sub_docs: _doc.metadata[id_key] = _id sub_docs.extend(_sub_docs)\n\nFinally, we index the documents in our vector store and document store:\n\n retriever.vectorstore.add_documents(sub_docs)retriever.docstore.mset(list(zip(doc_ids, docs)))\n\nThe vector store alone will retrieve small chunks:\n\n retriever.vectorstore.similarity_search(\"justice breyer\")[0]\n\n Document(page_content='Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court.', metadata={'doc_id': '064eca46-a4c4-4789-8e3b-583f9597e54f', 'source': 'state_of_the_union.txt'})\n\nWhereas the retriever will return the larger parent document:\n\n len(retriever.invoke(\"justice breyer\")[0].page_content)\n\n 9875\n\nThe default search type the retriever performs on the vector database is a similarity search. LangChain vector stores also support searching via [Max Marginal Relevance](https://api.python.langchain.com/en/latest/vectorstores/langchain_core.vectorstores.VectorStore.html#langchain_core.vectorstores.VectorStore.max_marginal_relevance_search). This can be controlled via the `search_type` parameter of the retriever:\n\n from langchain.retrievers.multi_vector import SearchTyperetriever.search_type = SearchType.mmrlen(retriever.invoke(\"justice breyer\")[0].page_content)\n\n**API Reference:**[SearchType](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.multi_vector.SearchType.html)\n\n 9875\n\nAssociating summaries with a document for retrieval[​](#associating-summaries-with-a-document-for-retrieval \"Direct link to Associating summaries with a document for retrieval\")\n---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\nA summary may be able to distill more accurately what a chunk is about, leading to better retrieval. Here we show how to create summaries, and then embed those.\n\nWe construct a simple [chain](/v0.2/docs/how_to/sequence/) that will receive an input [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) object and generate a summary using a LLM.\n\n* OpenAI\n* Anthropic\n* Azure\n* Google\n* Cohere\n* FireworksAI\n* Groq\n* MistralAI\n* TogetherAI\n\n pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"OPENAI_API_KEY\"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")\n\n pip install -qU langchain-anthropic\n\n import getpassimport osos.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()from langchain_anthropic import ChatAnthropicllm = ChatAnthropic(model=\"claude-3-sonnet-20240229\")\n\n pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"AZURE_OPENAI_API_KEY\"] = getpass.getpass()from langchain_openai import AzureChatOpenAIllm = AzureChatOpenAI( azure_endpoint=os.environ[\"AZURE_OPENAI_ENDPOINT\"], azure_deployment=os.environ[\"AZURE_OPENAI_DEPLOYMENT_NAME\"], openai_api_version=os.environ[\"AZURE_OPENAI_API_VERSION\"],)\n\n pip install -qU langchain-google-vertexai\n\n import getpassimport osos.environ[\"GOOGLE_API_KEY\"] = getpass.getpass()from langchain_google_vertexai import ChatVertexAIllm = ChatVertexAI(model=\"gemini-pro\")\n\n pip install -qU langchain-cohere\n\n import getpassimport osos.environ[\"COHERE_API_KEY\"] = getpass.getpass()from langchain_cohere import ChatCoherellm = ChatCohere(model=\"command-r\")\n\n pip install -qU langchain-fireworks\n\n import getpassimport osos.environ[\"FIREWORKS_API_KEY\"] = getpass.getpass()from langchain_fireworks import ChatFireworksllm = ChatFireworks(model=\"accounts/fireworks/models/mixtral-8x7b-instruct\")\n\n pip install -qU langchain-groq\n\n import getpassimport osos.environ[\"GROQ_API_KEY\"] = getpass.getpass()from langchain_groq import ChatGroqllm = ChatGroq(model=\"llama3-8b-8192\")\n\n pip install -qU langchain-mistralai\n\n import getpassimport osos.environ[\"MISTRAL_API_KEY\"] = getpass.getpass()from langchain_mistralai import ChatMistralAIllm = ChatMistralAI(model=\"mistral-large-latest\")\n\n pip install -qU langchain-openai\n\n import getpassimport osos.environ[\"TOGETHER_API_KEY\"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI( base_url=\"https://api.together.xyz/v1\", api_key=os.environ[\"TOGETHER_API_KEY\"], model=\"mistralai/Mixtral-8x7B-Instruct-v0.1\",)\n\n import uuidfrom langchain_core.documents import Documentfrom langchain_core.output_parsers import StrOutputParserfrom langchain_core.prompts import ChatPromptTemplatechain = ( {\"doc\": lambda x: x.page_content} | ChatPromptTemplate.from_template(\"Summarize the following document:\\n\\n{doc}\") | llm | StrOutputParser())\n\n**API Reference:**[Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) | [StrOutputParser](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.string.StrOutputParser.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html)\n\nNote that we can [batch](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable) the chain accross documents:\n\n summaries = chain.batch(docs, {\"max_concurrency\": 5})\n\nWe can then initialize a `MultiVectorRetriever` as before, indexing the summaries in our vector store, and retaining the original documents in our document store:\n\n # The vectorstore to use to index the child chunksvectorstore = Chroma(collection_name=\"summaries\", embedding_function=OpenAIEmbeddings())# The storage layer for the parent documentsstore = InMemoryByteStore()id_key = \"doc_id\"# The retriever (empty to start)retriever = MultiVectorRetriever( vectorstore=vectorstore, byte_store=store, id_key=id_key,)doc_ids = [str(uuid.uuid4()) for _ in docs]summary_docs = [ Document(page_content=s, metadata={id_key: doc_ids[i]}) for i, s in enumerate(summaries)]retriever.vectorstore.add_documents(summary_docs)retriever.docstore.mset(list(zip(doc_ids, docs)))\n\n # # We can also add the original chunks to the vectorstore if we so want# for i, doc in enumerate(docs):# doc.metadata[id_key] = doc_ids[i]# retriever.vectorstore.add_documents(docs)\n\nQuerying the vector store will return summaries:\n\n sub_docs = retriever.vectorstore.similarity_search(\"justice breyer\")sub_docs[0]\n\n Document(page_content=\"President Biden recently nominated Judge Ketanji Brown Jackson to serve on the United States Supreme Court, emphasizing her qualifications and broad support. The President also outlined a plan to secure the border, fix the immigration system, protect women's rights, support LGBTQ+ Americans, and advance mental health services. He highlighted the importance of bipartisan unity in passing legislation, such as the Violence Against Women Act. The President also addressed supporting veterans, particularly those impacted by exposure to burn pits, and announced plans to expand benefits for veterans with respiratory cancers. Additionally, he proposed a plan to end cancer as we know it through the Cancer Moonshot initiative. President Biden expressed optimism about the future of America and emphasized the strength of the American people in overcoming challenges.\", metadata={'doc_id': '84015b1b-980e-400a-94d8-cf95d7e079bd'})\n\nWhereas the retriever will return the larger source document:\n\n retrieved_docs = retriever.invoke(\"justice breyer\")len(retrieved_docs[0].page_content)\n\n 9194\n\nHypothetical Queries[​](#hypothetical-queries \"Direct link to Hypothetical Queries\")\n------------------------------------------------------------------------------------\n\nAn LLM can also be used to generate a list of hypothetical questions that could be asked of a particular document, which might bear close semantic similarity to relevant queries in a [RAG](/v0.2/docs/tutorials/rag/) application. These questions can then be embedded and associated with the documents to improve retrieval.\n\nBelow, we use the [with\\_structured\\_output](/v0.2/docs/how_to/structured_output/) method to structure the LLM output into a list of strings.\n\n from typing import Listfrom langchain_core.pydantic_v1 import BaseModel, Fieldclass HypotheticalQuestions(BaseModel): \"\"\"Generate hypothetical questions.\"\"\" questions: List[str] = Field(..., description=\"List of questions\")chain = ( {\"doc\": lambda x: x.page_content} # Only asking for 3 hypothetical questions, but this could be adjusted | ChatPromptTemplate.from_template( \"Generate a list of exactly 3 hypothetical questions that the below document could be used to answer:\\n\\n{doc}\" ) | ChatOpenAI(max_retries=0, model=\"gpt-4o\").with_structured_output( HypotheticalQuestions ) | (lambda x: x.questions))\n\nInvoking the chain on a single document demonstrates that it outputs a list of questions:\n\n chain.invoke(docs[0])\n\n [\"What impact did the IBM 1401 have on the author's early programming experiences?\", \"How did the transition from using the IBM 1401 to microcomputers influence the author's programming journey?\", \"What role did Lisp play in shaping the author's understanding and approach to AI?\"]\n\nWe can batch then batch the chain over all documents and assemble our vector store and document store as before:\n\n # Batch chain over documents to generate hypothetical questionshypothetical_questions = chain.batch(docs, {\"max_concurrency\": 5})# The vectorstore to use to index the child chunksvectorstore = Chroma( collection_name=\"hypo-questions\", embedding_function=OpenAIEmbeddings())# The storage layer for the parent documentsstore = InMemoryByteStore()id_key = \"doc_id\"# The retriever (empty to start)retriever = MultiVectorRetriever( vectorstore=vectorstore, byte_store=store, id_key=id_key,)doc_ids = [str(uuid.uuid4()) for _ in docs]# Generate Document objects from hypothetical questionsquestion_docs = []for i, question_list in enumerate(hypothetical_questions): question_docs.extend( [Document(page_content=s, metadata={id_key: doc_ids[i]}) for s in question_list] )retriever.vectorstore.add_documents(question_docs)retriever.docstore.mset(list(zip(doc_ids, docs)))\n\nNote that querying the underlying vector store will retrieve hypothetical questions that are semantically similar to the input query:\n\n sub_docs = retriever.vectorstore.similarity_search(\"justice breyer\")sub_docs\n\n [Document(page_content='What might be the potential benefits of nominating Circuit Court of Appeals Judge Ketanji Brown Jackson to the United States Supreme Court?', metadata={'doc_id': '43292b74-d1b8-4200-8a8b-ea0cb57fbcdb'}), Document(page_content='How might the Bipartisan Infrastructure Law impact the economic competition between the U.S. and China?', metadata={'doc_id': '66174780-d00c-4166-9791-f0069846e734'}), Document(page_content='What factors led to the creation of Y Combinator?', metadata={'doc_id': '72003c4e-4cc9-4f09-a787-0b541a65b38c'}), Document(page_content='How did the ability to publish essays online change the landscape for writers and thinkers?', metadata={'doc_id': 'e8d2c648-f245-4bcc-b8d3-14e64a164b64'})]\n\nAnd invoking the retriever will return the corresponding document:\n\n retrieved_docs = retriever.invoke(\"justice breyer\")len(retrieved_docs[0].page_content)\n\n 9194\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/multi_vector.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to migrate from legacy LangChain agents to LangGraph\n\n](/v0.2/docs/how_to/migrate_agent/)[\n\nNext\n\nHow to pass multimodal data directly to models\n\n](/v0.2/docs/how_to/multimodal_inputs/)\n\n* [Smaller chunks](#smaller-chunks)\n* [Associating summaries with a document for retrieval](#associating-summaries-with-a-document-for-retrieval)\n* [Hypothetical Queries](#hypothetical-queries)"},"last_modified":{"kind":"null"}}},{"rowIdx":1392,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/tutorials/summarization/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [Tutorials](/v0.2/docs/tutorials/)\n* Summarize Text\n\nOn this page\n\nSummarize Text\n==============\n\nSuppose you have a set of documents (PDFs, Notion pages, customer questions, etc.) and you want to summarize the content.\n\nLLMs are a great tool for this given their proficiency in understanding and synthesizing text.\n\nIn the context of [retrieval-augmented generation](/v0.2/docs/tutorials/rag/), summarizing text can help distill the information in a large number of retrieved documents to provide context for a LLM.\n\nIn this walkthrough we'll go over how to summarize content from multiple documents using LLMs.\n\n![Image description](/v0.2/assets/images/summarization_use_case_1-874f7b2c94f64216f1f967fb5aca7bc1.png)\n\nConcepts[​](#concepts \"Direct link to Concepts\")\n------------------------------------------------\n\nConcepts we will cover are:\n\n* Using [language models](/v0.2/docs/concepts/#chat-models).\n \n* Using [document loaders](/v0.2/docs/concepts/#document-loaders), specifically the [WebBaseLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.web_base.WebBaseLoader.html) to load content from an HTML webpage.\n \n* Three ways to summarize or otherwise combine documents.\n \n 1. [Stuff](/v0.2/docs/tutorials/summarization/#stuff), which simply concatenates documents into a prompt;\n 2. [Map-reduce](/v0.2/docs/tutorials/summarization/#map-reduce), which splits documents into batches, summarizes those, and then summarizes the summaries;\n 3. [Refine](/v0.2/docs/tutorials/summarization/#refine), which updates a rolling summary be iterating over the documents in a sequence.\n\nThat's a fair amount to cover! Let's dive in.\n\nSetup[​](#setup \"Direct link to Setup\")\n---------------------------------------\n\n### Jupyter Notebook[​](#jupyter-notebook \"Direct link to Jupyter Notebook\")\n\nThis guide (and most of the other guides in the documentation) uses [Jupyter notebooks](https://jupyter.org/) and assumes the reader is as well. Jupyter notebooks are perfect for learning how to work with LLM systems because oftentimes things can go wrong (unexpected output, API down, etc) and going through guides in an interactive environment is a great way to better understand them.\n\nThis and other tutorials are perhaps most conveniently run in a Jupyter notebook. See [here](https://jupyter.org/install) for instructions on how to install.\n\n### Installation[​](#installation \"Direct link to Installation\")\n\nTo install LangChain run:\n\n* Pip\n* Conda\n\n pip install langchain\n\n conda install langchain -c conda-forge\n\nFor more details, see our [Installation guide](/v0.2/docs/how_to/installation/).\n\n### LangSmith[​](#langsmith \"Direct link to LangSmith\")\n\nMany of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with [LangSmith](https://smith.langchain.com).\n\nAfter you sign up at the link above, make sure to set your environment variables to start logging traces:\n\n export LANGCHAIN_TRACING_V2=\"true\"export LANGCHAIN_API_KEY=\"...\"\n\nOr, if in a notebook, you can set them with:\n\n import getpassimport osos.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()\n\nOverview[​](#overview \"Direct link to Overview\")\n------------------------------------------------\n\nA central question for building a summarizer is how to pass your documents into the LLM's context window. Three common approaches for this are:\n\n1. `Stuff`: Simply \"stuff\" all your documents into a single prompt. This is the simplest approach (see [here](/v0.2/docs/tutorials/rag/#built-in-chains) for more on the `create_stuff_documents_chain` constructor, which is used for this method).\n \n2. `Map-reduce`: Summarize each document on its own in a \"map\" step and then \"reduce\" the summaries into a final summary (see [here](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain.html) for more on the `MapReduceDocumentsChain`, which is used for this method).\n \n3. `Refine`: Update a rolling summary be iterating over the documents in a sequence.\n \n\n![Image description](/v0.2/assets/images/summarization_use_case_2-f2a4d5d60980a79140085fb7f8043217.png)\n\nQuickstart[​](#quickstart \"Direct link to Quickstart\")\n------------------------------------------------------\n\nTo give you a sneak preview, either pipeline can be wrapped in a single object: `load_summarize_chain`.\n\nSuppose we want to summarize a blog post. We can create this in a few lines of code.\n\nFirst set environment variables and install packages:\n\n %pip install --upgrade --quiet langchain-openai tiktoken chromadb langchain# Set env var OPENAI_API_KEY or load from a .env file# import dotenv# dotenv.load_dotenv()\n\nWe can use `chain_type=\"stuff\"`, especially if using larger context window models such as:\n\n* 128k token OpenAI `gpt-4-turbo-2024-04-09`\n* 200k token Anthropic `claude-3-sonnet-20240229`\n\nWe can also supply `chain_type=\"map_reduce\"` or `chain_type=\"refine\"`.\n\nFirst we load in our documents. We will use [WebBaseLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.web_base.WebBaseLoader.html) to load a blog post:\n\n import osos.environ[\"LANGCHAIN_TRACING_V2\"] = \"True\"\n\n from langchain.chains.summarize import load_summarize_chainfrom langchain_community.document_loaders import WebBaseLoaderfrom langchain_openai import ChatOpenAIloader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")docs = loader.load()llm = ChatOpenAI(temperature=0, model_name=\"gpt-3.5-turbo-1106\")chain = load_summarize_chain(llm, chain_type=\"stuff\")result = chain.invoke(docs)print(result[\"output_text\"])\n\n**API Reference:**[load\\_summarize\\_chain](https://api.python.langchain.com/en/latest/chains/langchain.chains.summarize.chain.load_summarize_chain.html) | [WebBaseLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.web_base.WebBaseLoader.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html)\n\n The article discusses the concept of LLM-powered autonomous agents, with a focus on the components of planning, memory, and tool use. It includes case studies and proof-of-concept examples, as well as challenges and references to related research. The author emphasizes the potential of LLMs in creating powerful problem-solving agents, while also highlighting limitations such as finite context length and reliability of natural language interfaces.\n\nOption 1. Stuff[​](#stuff \"Direct link to Option 1. Stuff\")\n-----------------------------------------------------------\n\nWhen we use `load_summarize_chain` with `chain_type=\"stuff\"`, we will use the [StuffDocumentsChain](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.stuff.StuffDocumentsChain.html#langchain.chains.combine_documents.stuff.StuffDocumentsChain).\n\nThe chain will take a list of documents, insert them all into a prompt, and pass that prompt to an LLM:\n\n from langchain.chains.combine_documents.stuff import StuffDocumentsChainfrom langchain.chains.llm import LLMChainfrom langchain_core.prompts import PromptTemplate# Define promptprompt_template = \"\"\"Write a concise summary of the following:\"{text}\"CONCISE SUMMARY:\"\"\"prompt = PromptTemplate.from_template(prompt_template)# Define LLM chainllm = ChatOpenAI(temperature=0, model_name=\"gpt-3.5-turbo-16k\")llm_chain = LLMChain(llm=llm, prompt=prompt)# Define StuffDocumentsChainstuff_chain = StuffDocumentsChain(llm_chain=llm_chain, document_variable_name=\"text\")docs = loader.load()print(stuff_chain.invoke(docs)[\"output_text\"])\n\n**API Reference:**[StuffDocumentsChain](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.stuff.StuffDocumentsChain.html) | [LLMChain](https://api.python.langchain.com/en/latest/chains/langchain.chains.llm.LLMChain.html) | [PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html)\n\n The article discusses the concept of building autonomous agents powered by large language models (LLMs). It explores the components of such agents, including planning, memory, and tool use. The article provides case studies and examples of proof-of-concept demos, highlighting the challenges and limitations of LLM-powered agents. It also includes references to related research papers and projects.\n\nGreat! We can see that we reproduce the earlier result using the `load_summarize_chain`.\n\n### Go deeper[​](#go-deeper \"Direct link to Go deeper\")\n\n* You can easily customize the prompt.\n* You can easily try different LLMs, (e.g., [Claude](/v0.2/docs/integrations/chat/anthropic/)) via the `llm` parameter.\n\nOption 2. Map-Reduce[​](#map-reduce \"Direct link to Option 2. Map-Reduce\")\n--------------------------------------------------------------------------\n\nLet's unpack the map reduce approach. For this, we'll first map each document to an individual summary using an `LLMChain`. Then we'll use a `ReduceDocumentsChain` to combine those summaries into a single global summary.\n\nFirst, we specify the LLMChain to use for mapping each document to an individual summary:\n\n from langchain.chains import MapReduceDocumentsChain, ReduceDocumentsChainfrom langchain_text_splitters import CharacterTextSplitterllm = ChatOpenAI(temperature=0)# Mapmap_template = \"\"\"The following is a set of documents{docs}Based on this list of docs, please identify the main themes Helpful Answer:\"\"\"map_prompt = PromptTemplate.from_template(map_template)map_chain = LLMChain(llm=llm, prompt=map_prompt)\n\n**API Reference:**[MapReduceDocumentsChain](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain.html) | [ReduceDocumentsChain](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.reduce.ReduceDocumentsChain.html) | [CharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.CharacterTextSplitter.html)\n\nWe can also use the Prompt Hub to store and fetch prompts.\n\nThis will work with your [LangSmith API key](https://docs.smith.langchain.com/).\n\nFor example, see the map prompt [here](https://smith.langchain.com/hub/rlm/map-prompt).\n\n from langchain import hubmap_prompt = hub.pull(\"rlm/map-prompt\")map_chain = LLMChain(llm=llm, prompt=map_prompt)\n\nThe `ReduceDocumentsChain` handles taking the document mapping results and reducing them into a single output. It wraps a generic `CombineDocumentsChain` (like `StuffDocumentsChain`) but adds the ability to collapse documents before passing it to the `CombineDocumentsChain` if their cumulative size exceeds `token_max`. In this example, we can actually re-use our chain for combining our docs to also collapse our docs.\n\nSo if the cumulative number of tokens in our mapped documents exceeds 4000 tokens, then we'll recursively pass in the documents in batches of < 4000 tokens to our `StuffDocumentsChain` to create batched summaries. And once those batched summaries are cumulatively less than 4000 tokens, we'll pass them all one last time to the `StuffDocumentsChain` to create the final summary.\n\n # Reducereduce_template = \"\"\"The following is set of summaries:{docs}Take these and distill it into a final, consolidated summary of the main themes. Helpful Answer:\"\"\"reduce_prompt = PromptTemplate.from_template(reduce_template)\n\n # Note we can also get this from the prompt hub, as noted abovereduce_prompt = hub.pull(\"rlm/reduce-prompt\")\n\n reduce_prompt\n\n ChatPromptTemplate(input_variables=['docs'], metadata={'lc_hub_owner': 'rlm', 'lc_hub_repo': 'map-prompt', 'lc_hub_commit_hash': 'de4fba345f211a462584fc25b7077e69c1ba6cdcf4e21b7ec9abe457ddb16c87'}, messages=[HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['docs'], template='The following is a set of documents:\\n{docs}\\nBased on this list of docs, please identify the main themes \\nHelpful Answer:'))])\n\n # Run chainreduce_chain = LLMChain(llm=llm, prompt=reduce_prompt)# Takes a list of documents, combines them into a single string, and passes this to an LLMChaincombine_documents_chain = StuffDocumentsChain( llm_chain=reduce_chain, document_variable_name=\"docs\")# Combines and iteratively reduces the mapped documentsreduce_documents_chain = ReduceDocumentsChain( # This is final chain that is called. combine_documents_chain=combine_documents_chain, # If documents exceed context for `StuffDocumentsChain` collapse_documents_chain=combine_documents_chain, # The maximum number of tokens to group documents into. token_max=4000,)\n\nCombining our map and reduce chains into one:\n\n # Combining documents by mapping a chain over them, then combining resultsmap_reduce_chain = MapReduceDocumentsChain( # Map chain llm_chain=map_chain, # Reduce chain reduce_documents_chain=reduce_documents_chain, # The variable name in the llm_chain to put the documents in document_variable_name=\"docs\", # Return the results of the map steps in the output return_intermediate_steps=False,)text_splitter = CharacterTextSplitter.from_tiktoken_encoder( chunk_size=1000, chunk_overlap=0)split_docs = text_splitter.split_documents(docs)\n\n Created a chunk of size 1003, which is longer than the specified 1000\n\n result = map_reduce_chain.invoke(split_docs)print(result[\"output_text\"])\n\n The main themes identified in the list of documents provided are related to large language models (LLMs), autonomous agents, prompting, steering language models, natural language processing (NLP), the use of tools to augment language models, reinforcement learning, reasoning, acting, self-reflection, and the integration of language models with external knowledge sources.\n\nIf we follow the [Langsmith Trace](https://smith.langchain.com/public/3a1a6d51-68e5-4805-8d90-78920ce60a51/r), we can see the the individual LLM summarizations, including the [final call](https://smith.langchain.com/public/69482813-f0b7-46b0-a99f-86d56fc9644a/r) that summarizes the summaries.\n\n### Go deeper[​](#go-deeper-1 \"Direct link to Go deeper\")\n\n**Customization**\n\n* As shown above, you can customize the LLMs and prompts for map and reduce stages.\n\n**Real-world use-case**\n\n* See [this blog post](https://blog.langchain.dev/llms-to-improve-documentation/) case-study on analyzing user interactions (questions about LangChain documentation)!\n* The blog post and associated [repo](https://github.com/mendableai/QA_clustering) also introduce clustering as a means of summarization.\n* This opens up another path beyond the `stuff` or `map-reduce` approaches that is worth considering.\n\n![Image description](/v0.2/assets/images/summarization_use_case_3-896f435bc48194ddaead73043027e16f.png)\n\nOption 3. Refine[​](#refine \"Direct link to Option 3. Refine\")\n--------------------------------------------------------------\n\n[RefineDocumentsChain](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.refine.RefineDocumentsChain.html) is similar to map-reduce:\n\n> The refine documents chain constructs a response by looping over the input documents and iteratively updating its answer. For each document, it passes all non-document inputs, the current document, and the latest intermediate answer to an LLM chain to get a new answer.\n\nThis can be easily run with the `chain_type=\"refine\"` specified.\n\n chain = load_summarize_chain(llm, chain_type=\"refine\")result = chain.invoke(split_docs)print(result[\"output_text\"])\n\n The existing summary provides detailed instructions for implementing a project's architecture through code, focusing on creating core classes, functions, and methods in different files following best practices for the chosen language and framework. Assumptions about the model, view, and controller components are also outlined. The additional context highlights challenges in long-term planning and task decomposition, as well as the reliability issues with natural language interfaces in LLM-powered autonomous agents. These insights shed light on the limitations and potential pitfalls of using LLMs in agent systems, with references to recent research on LLM-powered autonomous agents and related technologies.\n\nFollowing the [Langsmith trace](https://smith.langchain.com/public/38017fa7-b190-4635-992c-e8554227a4bb/r), we can see the summaries iteratively updated with new information.\n\nIt's also possible to supply a prompt and return intermediate steps.\n\n prompt_template = \"\"\"Write a concise summary of the following:{text}CONCISE SUMMARY:\"\"\"prompt = PromptTemplate.from_template(prompt_template)refine_template = ( \"Your job is to produce a final summary\\n\" \"We have provided an existing summary up to a certain point: {existing_answer}\\n\" \"We have the opportunity to refine the existing summary\" \"(only if needed) with some more context below.\\n\" \"------------\\n\" \"{text}\\n\" \"------------\\n\" \"Given the new context, refine the original summary in Italian\" \"If the context isn't useful, return the original summary.\")refine_prompt = PromptTemplate.from_template(refine_template)chain = load_summarize_chain( llm=llm, chain_type=\"refine\", question_prompt=prompt, refine_prompt=refine_prompt, return_intermediate_steps=True, input_key=\"input_documents\", output_key=\"output_text\",)result = chain.invoke({\"input_documents\": split_docs}, return_only_outputs=True)\n\n print(result[\"output_text\"])\n\n Il presente articolo discute il concetto di costruire agenti autonomi utilizzando LLM (large language model) come controller principale. Esplora i diversi componenti di un sistema di agenti alimentato da LLM, tra cui la pianificazione, la memoria e l'uso degli strumenti. Dimostrazioni di concetto come AutoGPT mostrano il potenziale di LLM come risolutore generale di problemi. Approcci come Chain of Thought, Tree of Thoughts, LLM+P, ReAct e Reflexion consentono agli agenti autonomi di pianificare, riflettere su se stessi e migliorarsi iterativamente. Tuttavia, ci sono sfide da affrontare, come la limitata capacità di contesto che limita l'inclusione di informazioni storiche dettagliate e la difficoltà di pianificazione a lungo termine e decomposizione delle attività. Inoltre, l'affidabilità dell'interfaccia di linguaggio naturale tra LLM e componenti esterni come la memoria e gli strumenti è incerta, poiché i LLM possono commettere errori di formattazione e mostrare comportamenti ribelli. Nonostante ciò, il sistema AutoGPT viene menzionato come esempio di dimostrazione di concetto che utilizza LLM come controller principale per agenti autonomi. Questo articolo fa riferimento a diverse fonti che esplorano approcci e applicazioni specifiche di LLM nell'ambito degli agenti autonomi.\n\n print(\"\\n\\n\".join(result[\"intermediate_steps\"][:3]))\n\n This article discusses the concept of building autonomous agents using LLM (large language model) as the core controller. The article explores the different components of an LLM-powered agent system, including planning, memory, and tool use. It also provides examples of proof-of-concept demos and highlights the potential of LLM as a general problem solver.Questo articolo discute del concetto di costruire agenti autonomi utilizzando LLM (large language model) come controller principale. L'articolo esplora i diversi componenti di un sistema di agenti alimentato da LLM, inclusa la pianificazione, la memoria e l'uso degli strumenti. Vengono forniti anche esempi di dimostrazioni di proof-of-concept e si evidenzia il potenziale di LLM come risolutore generale di problemi. Inoltre, vengono presentati approcci come Chain of Thought, Tree of Thoughts, LLM+P, ReAct e Reflexion che consentono agli agenti autonomi di pianificare, riflettere su se stessi e migliorare iterativamente.Questo articolo discute del concetto di costruire agenti autonomi utilizzando LLM (large language model) come controller principale. L'articolo esplora i diversi componenti di un sistema di agenti alimentato da LLM, inclusa la pianificazione, la memoria e l'uso degli strumenti. Vengono forniti anche esempi di dimostrazioni di proof-of-concept e si evidenzia il potenziale di LLM come risolutore generale di problemi. Inoltre, vengono presentati approcci come Chain of Thought, Tree of Thoughts, LLM+P, ReAct e Reflexion che consentono agli agenti autonomi di pianificare, riflettere su se stessi e migliorare iterativamente. Il nuovo contesto riguarda l'approccio Chain of Hindsight (CoH) che permette al modello di migliorare autonomamente i propri output attraverso un processo di apprendimento supervisionato. Viene anche presentato l'approccio Algorithm Distillation (AD) che applica lo stesso concetto alle traiettorie di apprendimento per compiti di reinforcement learning.\n\nSplitting and summarizing in a single chain[​](#splitting-and-summarizing-in-a-single-chain \"Direct link to Splitting and summarizing in a single chain\")\n---------------------------------------------------------------------------------------------------------------------------------------------------------\n\nFor convenience, we can wrap both the text splitting of our long document and summarizing in a single `AnalyzeDocumentsChain`.\n\n from langchain.chains import AnalyzeDocumentChainsummarize_document_chain = AnalyzeDocumentChain( combine_docs_chain=chain, text_splitter=text_splitter)summarize_document_chain.invoke(docs[0].page_content)\n\n**API Reference:**[AnalyzeDocumentChain](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.base.AnalyzeDocumentChain.html)\n\nNext steps[​](#next-steps \"Direct link to Next steps\")\n------------------------------------------------------\n\nWe encourage you to check out the [how-to guides](/v0.2/docs/how_to/) for more detail on:\n\n* Built-in [document loaders](/v0.2/docs/how_to/#document-loaders) and [text-splitters](/v0.2/docs/how_to/#text-splitters)\n* Integrating various combine-document chains into a [RAG application](/v0.2/docs/tutorials/rag/)\n* Incorporating retrieval into a [chatbot](/v0.2/docs/how_to/chatbots_retrieval/)\n\nand other concepts.\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/tutorials/summarization.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nBuild a Question/Answering system over SQL data\n\n](/v0.2/docs/tutorials/sql_qa/)[\n\nNext\n\nHow-to guides\n\n](/v0.2/docs/how_to/)\n\n* [Concepts](#concepts)\n* [Setup](#setup)\n * [Jupyter Notebook](#jupyter-notebook)\n * [Installation](#installation)\n * [LangSmith](#langsmith)\n* [Overview](#overview)\n* [Quickstart](#quickstart)\n* [Option 1. Stuff](#stuff)\n * [Go deeper](#go-deeper)\n* [Option 2. Map-Reduce](#map-reduce)\n * [Go deeper](#go-deeper-1)\n* [Option 3. Refine](#refine)\n* [Splitting and summarizing in a single chain](#splitting-and-summarizing-in-a-single-chain)\n* [Next steps](#next-steps)"},"last_modified":{"kind":"null"}}},{"rowIdx":1393,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/tutorials/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* Tutorials\n\nOn this page\n\nTutorials\n=========\n\nNew to LangChain or to LLM app development in general? Read this material to quickly get up and running.\n\nBasics[​](#basics \"Direct link to Basics\")\n------------------------------------------\n\n* [Build a Simple LLM Application with LCEL](/v0.2/docs/tutorials/llm_chain/)\n* [Build a Chatbot](/v0.2/docs/tutorials/chatbot/)\n* [Build vector stores and retrievers](/v0.2/docs/tutorials/retrievers/)\n* [Build an Agent](/v0.2/docs/tutorials/agents/)\n\nWorking with external knowledge[​](#working-with-external-knowledge \"Direct link to Working with external knowledge\")\n---------------------------------------------------------------------------------------------------------------------\n\n* [Build a Retrieval Augmented Generation (RAG) Application](/v0.2/docs/tutorials/rag/)\n* [Build a Conversational RAG Application](/v0.2/docs/tutorials/qa_chat_history/)\n* [Build a Question/Answering system over SQL data](/v0.2/docs/tutorials/sql_qa/)\n* [Build a Query Analysis System](/v0.2/docs/tutorials/query_analysis/)\n* [Build a local RAG application](/v0.2/docs/tutorials/local_rag/)\n* [Build a Question Answering application over a Graph Database](/v0.2/docs/tutorials/graph/)\n* [Build a PDF ingestion and Question/Answering system](/v0.2/docs/tutorials/pdf_qa/)\n\nSpecialized tasks[​](#specialized-tasks \"Direct link to Specialized tasks\")\n---------------------------------------------------------------------------\n\n* [Build an Extraction Chain](/v0.2/docs/tutorials/extraction/)\n* [Generate synthetic data](/v0.2/docs/tutorials/data_generation/)\n* [Classify text into labels](/v0.2/docs/tutorials/classification/)\n* [Summarize text](/v0.2/docs/tutorials/summarization/)\n\nLangGraph[​](#langgraph \"Direct link to LangGraph\")\n---------------------------------------------------\n\nLangGraph is an extension of LangChain aimed at building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.\n\nLangGraph documentation is currently hosted on a separate site. You can peruse [LangGraph tutorials here](https://langchain-ai.github.io/langgraph/tutorials/).\n\nLangSmith[​](#langsmith \"Direct link to LangSmith\")\n---------------------------------------------------\n\nLangSmith allows you to closely trace, monitor and evaluate your LLM application. It seamlessly integrates with LangChain, and you can use it to inspect and debug individual steps of your chains as you build.\n\nLangSmith documentation is hosted on a separate site. You can peruse [LangSmith tutorials here](https://docs.smith.langchain.com/tutorials/).\n\n### Evaluation[​](#evaluation \"Direct link to Evaluation\")\n\nLangSmith helps you evaluate the performance of your LLM applications. The below tutorial is a great way to get started:\n\n* [Evaluate your LLM application](https://docs.smith.langchain.com/tutorials/Developers/evaluation)\n\nMore[​](#more \"Direct link to More\")\n------------------------------------\n\nFor more tutorials, see our [cookbook section](https://github.com/langchain-ai/langchain/tree/master/cookbook).\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/tutorials/index.mdx)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nIntroduction\n\n](/v0.2/docs/introduction/)[\n\nNext\n\nBuild a Question Answering application over a Graph Database\n\n](/v0.2/docs/tutorials/graph/)\n\n* [Basics](#basics)\n* [Working with external knowledge](#working-with-external-knowledge)\n* [Specialized tasks](#specialized-tasks)\n* [LangGraph](#langgraph)\n* [LangSmith](#langsmith)\n * [Evaluation](#evaluation)\n* [More](#more)"},"last_modified":{"kind":"null"}}},{"rowIdx":1394,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/migrate_agent/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to migrate from legacy LangChain agents to LangGraph\n\nOn this page\n\nHow to migrate from legacy LangChain agents to LangGraph\n========================================================\n\nHere we focus on how to move from legacy LangChain agents to LangGraph agents. LangChain agents (the [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor) in particular) have multiple configuration parameters. In this notebook we will show how those parameters map to the LangGraph [react agent executor](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent).\n\n#### Prerequisites[​](#prerequisites \"Direct link to Prerequisites\")\n\nThis how-to guide uses OpenAI as the LLM. Install the dependencies to run.\n\n %%capture --no-stderr%pip install -U langgraph langchain langchain-openai\n\nBasic Usage[​](#basic-usage \"Direct link to Basic Usage\")\n---------------------------------------------------------\n\nFor basic creation and usage of a tool-calling ReAct-style agent, the functionality is the same. First, let's define a model and tool(s), then we'll use those to create an agent.\n\n from langchain_core.tools import toolfrom langchain_openai import ChatOpenAImodel = ChatOpenAI(model=\"gpt-4o\")@tooldef magic_function(input: int) -> int: \"\"\"Applies a magic function to an input.\"\"\" return input + 2tools = [magic_function]query = \"what is the value of magic_function(3)?\"\n\n**API Reference:**[tool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html)\n\nFor the LangChain [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor), we define a prompt with a placeholder for the agent's scratchpad. The agent can be invoked as follows:\n\n from langchain.agents import AgentExecutor, create_tool_calling_agentfrom langchain_core.prompts import ChatPromptTemplateprompt = ChatPromptTemplate.from_messages( [ (\"system\", \"You are a helpful assistant\"), (\"human\", \"{input}\"), # Placeholders fill up a **list** of messages (\"placeholder\", \"{agent_scratchpad}\"), ])agent = create_tool_calling_agent(model, tools, prompt)agent_executor = AgentExecutor(agent=agent, tools=tools)agent_executor.invoke({\"input\": query})\n\n**API Reference:**[AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html) | [create\\_tool\\_calling\\_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.tool_calling_agent.base.create_tool_calling_agent.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html)\n\n {'input': 'what is the value of magic_function(3)?', 'output': 'The value of `magic_function(3)` is 5.'}\n\nLangGraph's [react agent executor](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent) manages a state that is defined by a list of messages. It will continue to process the list until there are no tool calls in the agent's output. To kick it off, we input a list of messages. The output will contain the entire state of the graph-- in this case, the conversation history.\n\n from langgraph.prebuilt import create_react_agentapp = create_react_agent(model, tools)messages = app.invoke({\"messages\": [(\"human\", query)]}){ \"input\": query, \"output\": messages[\"messages\"][-1].content,}\n\n {'input': 'what is the value of magic_function(3)?', 'output': 'The value of `magic_function(3)` is 5.'}\n\n message_history = messages[\"messages\"]new_query = \"Pardon?\"messages = app.invoke({\"messages\": message_history + [(\"human\", new_query)]}){ \"input\": new_query, \"output\": messages[\"messages\"][-1].content,}\n\n {'input': 'Pardon?', 'output': 'The result of applying `magic_function` to the input 3 is 5.'}\n\nPrompt Templates[​](#prompt-templates \"Direct link to Prompt Templates\")\n------------------------------------------------------------------------\n\nWith legacy LangChain agents you have to pass in a prompt template. You can use this to control the agent.\n\nWith LangGraph [react agent executor](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent), by default there is no prompt. You can achieve similar control over the agent in a few ways:\n\n1. Pass in a system message as input\n2. Initialize the agent with a system message\n3. Initialize the agent with a function to transform messages before passing to the model.\n\nLet's take a look at all of these below. We will pass in custom instructions to get the agent to respond in Spanish.\n\nFirst up, using AgentExecutor:\n\n prompt = ChatPromptTemplate.from_messages( [ (\"system\", \"You are a helpful assistant. Respond only in Spanish.\"), (\"human\", \"{input}\"), # Placeholders fill up a **list** of messages (\"placeholder\", \"{agent_scratchpad}\"), ])agent = create_tool_calling_agent(model, tools, prompt)agent_executor = AgentExecutor(agent=agent, tools=tools)agent_executor.invoke({\"input\": query})\n\n {'input': 'what is the value of magic_function(3)?', 'output': 'El valor de `magic_function(3)` es 5.'}\n\nNow, let's pass a custom system message to [react agent executor](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent). This can either be a string or a LangChain SystemMessage.\n\n from langchain_core.messages import SystemMessagefrom langgraph.prebuilt import create_react_agentsystem_message = \"You are a helpful assistant. Respond only in Spanish.\"# This could also be a SystemMessage object# system_message = SystemMessage(content=\"You are a helpful assistant. Respond only in Spanish.\")app = create_react_agent(model, tools, messages_modifier=system_message)messages = app.invoke({\"messages\": [(\"user\", query)]})\n\n**API Reference:**[SystemMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.system.SystemMessage.html)\n\nWe can also pass in an arbitrary function. This function should take in a list of messages and output a list of messages. We can do all types of arbitrary formatting of messages here. In this cases, let's just add a SystemMessage to the start of the list of messages.\n\n from langchain_core.messages import AnyMessagefrom langgraph.prebuilt import create_react_agentprompt = ChatPromptTemplate.from_messages( [ (\"system\", \"You are a helpful assistant. Respond only in Spanish.\"), (\"placeholder\", \"{messages}\"), ])def _modify_messages(messages: list[AnyMessage]): return prompt.invoke({\"messages\": messages}).to_messages() + [ (\"user\", \"Also say 'Pandamonium!' after the answer.\") ]app = create_react_agent(model, tools, messages_modifier=_modify_messages)messages = app.invoke({\"messages\": [(\"human\", query)]})print( { \"input\": query, \"output\": messages[\"messages\"][-1].content, })\n\n {'input': 'what is the value of magic_function(3)?', 'output': 'El valor de magic_function(3) es 5. ¡Pandamonium!'}\n\nMemory[​](#memory \"Direct link to Memory\")\n------------------------------------------\n\nWith LangChain's [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter), you could add chat [Memory](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.memory) so it can engage in a multi-turn conversation.\n\n from langchain.agents import AgentExecutor, create_tool_calling_agentfrom langchain_community.chat_message_histories import ChatMessageHistoryfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.runnables.history import RunnableWithMessageHistoryfrom langchain_core.tools import toolfrom langchain_openai import ChatOpenAImodel = ChatOpenAI(model=\"gpt-4o\")memory = ChatMessageHistory(session_id=\"test-session\")prompt = ChatPromptTemplate.from_messages( [ (\"system\", \"You are a helpful assistant.\"), # First put the history (\"placeholder\", \"{chat_history}\"), # Then the new input (\"human\", \"{input}\"), # Finally the scratchpad (\"placeholder\", \"{agent_scratchpad}\"), ])@tooldef magic_function(input: int) -> int: \"\"\"Applies a magic function to an input.\"\"\" return input + 2tools = [magic_function]agent = create_tool_calling_agent(model, tools, prompt)agent_executor = AgentExecutor(agent=agent, tools=tools)agent_with_chat_history = RunnableWithMessageHistory( agent_executor, # This is needed because in most real world scenarios, a session id is needed # It isn't really used here because we are using a simple in memory ChatMessageHistory lambda session_id: memory, input_messages_key=\"input\", history_messages_key=\"chat_history\",)config = {\"configurable\": {\"session_id\": \"test-session\"}}print( agent_with_chat_history.invoke( {\"input\": \"Hi, I'm polly! What's the output of magic_function of 3?\"}, config )[\"output\"])print(\"---\")print(agent_with_chat_history.invoke({\"input\": \"Remember my name?\"}, config)[\"output\"])print(\"---\")print( agent_with_chat_history.invoke({\"input\": \"what was that output again?\"}, config)[ \"output\" ])\n\n**API Reference:**[AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html) | [create\\_tool\\_calling\\_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.tool_calling_agent.base.create_tool_calling_agent.html) | [ChatMessageHistory](https://api.python.langchain.com/en/latest/chat_history/langchain_core.chat_history.ChatMessageHistory.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [RunnableWithMessageHistory](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html) | [tool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html)\n\n Hi Polly! The output of the magic function for the input 3 is 5.---Yes, I remember your name, Polly! How can I assist you further?---The output of the magic function for the input 3 is 5.\n\n#### In LangGraph[​](#in-langgraph \"Direct link to In LangGraph\")\n\nMemory is just [persistence](https://langchain-ai.github.io/langgraph/how-tos/persistence/), aka [checkpointing](https://langchain-ai.github.io/langgraph/reference/checkpoints/).\n\nAdd a `checkpointer` to the agent and you get chat memory for free.\n\n from langchain_core.messages import SystemMessagefrom langgraph.checkpoint import MemorySaver # an in-memory checkpointerfrom langgraph.prebuilt import create_react_agentsystem_message = \"You are a helpful assistant.\"# This could also be a SystemMessage object# system_message = SystemMessage(content=\"You are a helpful assistant. Respond only in Spanish.\")memory = MemorySaver()app = create_react_agent( model, tools, messages_modifier=system_message, checkpointer=memory)config = {\"configurable\": {\"thread_id\": \"test-thread\"}}print( app.invoke( { \"messages\": [ (\"user\", \"Hi, I'm polly! What's the output of magic_function of 3?\") ] }, config, )[\"messages\"][-1].content)print(\"---\")print( app.invoke({\"messages\": [(\"user\", \"Remember my name?\")]}, config)[\"messages\"][ -1 ].content)print(\"---\")print( app.invoke({\"messages\": [(\"user\", \"what was that output again?\")]}, config)[ \"messages\" ][-1].content)\n\n**API Reference:**[SystemMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.system.SystemMessage.html)\n\n Hi Polly! The output of the magic_function for the input 3 is 5.---Yes, your name is Polly!---The output of the magic_function for the input 3 was 5.\n\nIterating through steps[​](#iterating-through-steps \"Direct link to Iterating through steps\")\n---------------------------------------------------------------------------------------------\n\nWith LangChain's [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter), you could iterate over the steps using the [stream](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.stream) (or async `astream`) methods or the [iter](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter) method. LangGraph supports stepwise iteration using [stream](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.stream)\n\n from langchain.agents import AgentExecutor, create_tool_calling_agentfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.tools import toolfrom langchain_openai import ChatOpenAImodel = ChatOpenAI(model=\"gpt-4o\")prompt = ChatPromptTemplate.from_messages( [ (\"system\", \"You are a helpful assistant.\"), (\"human\", \"{input}\"), # Placeholders fill up a **list** of messages (\"placeholder\", \"{agent_scratchpad}\"), ])@tooldef magic_function(input: int) -> int: \"\"\"Applies a magic function to an input.\"\"\" return input + 2tools = [magic_function]agent = create_tool_calling_agent(model, tools, prompt=prompt)agent_executor = AgentExecutor(agent=agent, tools=tools)for step in agent_executor.stream({\"input\": query}): print(step)\n\n**API Reference:**[AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html) | [create\\_tool\\_calling\\_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.tool_calling_agent.base.create_tool_calling_agent.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [tool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html)\n\n {'actions': [ToolAgentAction(tool='magic_function', tool_input={'input': 3}, log=\"\\nInvoking: `magic_function` with `{'input': 3}`\\n\\n\\n\", message_log=[AIMessageChunk(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_q9MgGFjqJbV2xSUX93WqxmOt', 'function': {'arguments': '{\"input\":3}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'finish_reason': 'tool_calls'}, id='run-c68fd76f-a3c3-4c3c-bfd7-748c171ed4b8', tool_calls=[{'name': 'magic_function', 'args': {'input': 3}, 'id': 'call_q9MgGFjqJbV2xSUX93WqxmOt'}], tool_call_chunks=[{'name': 'magic_function', 'args': '{\"input\":3}', 'id': 'call_q9MgGFjqJbV2xSUX93WqxmOt', 'index': 0}])], tool_call_id='call_q9MgGFjqJbV2xSUX93WqxmOt')], 'messages': [AIMessageChunk(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_q9MgGFjqJbV2xSUX93WqxmOt', 'function': {'arguments': '{\"input\":3}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'finish_reason': 'tool_calls'}, id='run-c68fd76f-a3c3-4c3c-bfd7-748c171ed4b8', tool_calls=[{'name': 'magic_function', 'args': {'input': 3}, 'id': 'call_q9MgGFjqJbV2xSUX93WqxmOt'}], tool_call_chunks=[{'name': 'magic_function', 'args': '{\"input\":3}', 'id': 'call_q9MgGFjqJbV2xSUX93WqxmOt', 'index': 0}])]}{'steps': [AgentStep(action=ToolAgentAction(tool='magic_function', tool_input={'input': 3}, log=\"\\nInvoking: `magic_function` with `{'input': 3}`\\n\\n\\n\", message_log=[AIMessageChunk(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_q9MgGFjqJbV2xSUX93WqxmOt', 'function': {'arguments': '{\"input\":3}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'finish_reason': 'tool_calls'}, id='run-c68fd76f-a3c3-4c3c-bfd7-748c171ed4b8', tool_calls=[{'name': 'magic_function', 'args': {'input': 3}, 'id': 'call_q9MgGFjqJbV2xSUX93WqxmOt'}], tool_call_chunks=[{'name': 'magic_function', 'args': '{\"input\":3}', 'id': 'call_q9MgGFjqJbV2xSUX93WqxmOt', 'index': 0}])], tool_call_id='call_q9MgGFjqJbV2xSUX93WqxmOt'), observation=5)], 'messages': [FunctionMessage(content='5', name='magic_function')]}{'output': 'The value of `magic_function(3)` is 5.', 'messages': [AIMessage(content='The value of `magic_function(3)` is 5.')]}\n\n#### In LangGraph[​](#in-langgraph-1 \"Direct link to In LangGraph\")\n\nIn LangGraph, things are handled natively using [stream](https://langchain-ai.github.io/langgraph/reference/graphs/#langgraph.graph.graph.CompiledGraph.stream) or the asynchronous `astream` method.\n\n from langchain_core.messages import AnyMessagefrom langgraph.prebuilt import create_react_agentprompt = ChatPromptTemplate.from_messages( [ (\"system\", \"You are a helpful assistant.\"), (\"placeholder\", \"{messages}\"), ])def _modify_messages(messages: list[AnyMessage]): return prompt.invoke({\"messages\": messages}).to_messages()app = create_react_agent(model, tools, messages_modifier=_modify_messages)for step in app.stream({\"messages\": [(\"human\", query)]}, stream_mode=\"updates\"): print(step)\n\n {'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_yTjXXibj76tyFyPRa1soLo0S', 'function': {'arguments': '{\"input\":3}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 70, 'total_tokens': 84}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-b275f314-c42e-4e77-9dec-5c23f7dbd53b-0', tool_calls=[{'name': 'magic_function', 'args': {'input': 3}, 'id': 'call_yTjXXibj76tyFyPRa1soLo0S'}])]}}{'tools': {'messages': [ToolMessage(content='5', name='magic_function', id='41c5f227-528d-4483-a313-b03b23b1d327', tool_call_id='call_yTjXXibj76tyFyPRa1soLo0S')]}}{'agent': {'messages': [AIMessage(content='The value of `magic_function(3)` is 5.', response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 93, 'total_tokens': 107}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'stop', 'logprobs': None}, id='run-0ef12b6e-415d-4758-9b62-5e5e1b350072-0')]}}\n\n`return_intermediate_steps`[​](#return_intermediate_steps \"Direct link to return_intermediate_steps\")\n-----------------------------------------------------------------------------------------------------\n\nSetting this parameter on AgentExecutor allows users to access intermediate\\_steps, which pairs agent actions (e.g., tool invocations) with their outcomes.\n\n agent_executor = AgentExecutor(agent=agent, tools=tools, return_intermediate_steps=True)result = agent_executor.invoke({\"input\": query})print(result[\"intermediate_steps\"])\n\n [(ToolAgentAction(tool='magic_function', tool_input={'input': 3}, log=\"\\nInvoking: `magic_function` with `{'input': 3}`\\n\\n\\n\", message_log=[AIMessageChunk(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_ABI4hftfEdnVgKyfF6OzZbca', 'function': {'arguments': '{\"input\":3}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'finish_reason': 'tool_calls'}, id='run-837e794f-cfd8-40e0-8abc-4d98ced11b75', tool_calls=[{'name': 'magic_function', 'args': {'input': 3}, 'id': 'call_ABI4hftfEdnVgKyfF6OzZbca'}], tool_call_chunks=[{'name': 'magic_function', 'args': '{\"input\":3}', 'id': 'call_ABI4hftfEdnVgKyfF6OzZbca', 'index': 0}])], tool_call_id='call_ABI4hftfEdnVgKyfF6OzZbca'), 5)]\n\nBy default the [react agent executor](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent) in LangGraph appends all messages to the central state. Therefore, it is easy to see any intermediate steps by just looking at the full state.\n\n from langgraph.prebuilt import create_react_agentapp = create_react_agent(model, tools=tools)messages = app.invoke({\"messages\": [(\"human\", query)]})messages\n\n {'messages': [HumanMessage(content='what is the value of magic_function(3)?', id='0f63e437-c4d8-4da9-b6f5-b293ebfe4a64'), AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_S96v28LlI6hNkQrNnIio0JPh', 'function': {'arguments': '{\"input\":3}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 64, 'total_tokens': 78}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-ffef7898-14b1-4537-ad90-7c000a8a5d25-0', tool_calls=[{'name': 'magic_function', 'args': {'input': 3}, 'id': 'call_S96v28LlI6hNkQrNnIio0JPh'}]), ToolMessage(content='5', name='magic_function', id='fbd9df4e-1dda-4d3e-9044-b001f7875476', tool_call_id='call_S96v28LlI6hNkQrNnIio0JPh'), AIMessage(content='The value of `magic_function(3)` is 5.', response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 87, 'total_tokens': 101}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'stop', 'logprobs': None}, id='run-e5d94c54-d9f4-45cd-be8e-a9101a8d88d6-0')]}\n\n`max_iterations`[​](#max_iterations \"Direct link to max_iterations\")\n--------------------------------------------------------------------\n\n`AgentExecutor` implements a `max_iterations` parameter, whereas this is controlled via `recursion_limit` in LangGraph.\n\nNote that in AgentExecutor, an \"iteration\" includes a full turn of tool invocation and execution. In LangGraph, each step contributes to the recursion limit, so we will need to multiply by two (and add one) to get equivalent results.\n\nIf the recursion limit is reached, LangGraph raises a specific exception type, that we can catch and manage similarly to AgentExecutor.\n\n @tooldef magic_function(input: str) -> str: \"\"\"Applies a magic function to an input.\"\"\" return \"Sorry, there was an error. Please try again.\"tools = [magic_function]\n\n prompt = ChatPromptTemplate.from_messages( [ (\"system\", \"You are a helpful assistant. Respond only in Spanish.\"), (\"human\", \"{input}\"), # Placeholders fill up a **list** of messages (\"placeholder\", \"{agent_scratchpad}\"), ])agent = create_tool_calling_agent(model, tools, prompt)agent_executor = AgentExecutor( agent=agent, tools=tools, verbose=True, max_iterations=3,)agent_executor.invoke({\"input\": query})\n\n \u001b[1m> Entering new AgentExecutor chain...\u001b[0m\u001b[32;1m\u001b[1;3mInvoking: `magic_function` with `{'input': '3'}`\u001b[0m\u001b[36;1m\u001b[1;3mSorry, there was an error. Please try again.\u001b[0m\u001b[32;1m\u001b[1;3mInvoking: `magic_function` with `{'input': '3'}`responded: Parece que hubo un error al intentar obtener el valor de `magic_function(3)`. Permíteme intentarlo de nuevo.\u001b[0m\u001b[36;1m\u001b[1;3mSorry, there was an error. Please try again.\u001b[0m\u001b[32;1m\u001b[1;3mAún no puedo obtener el valor de `magic_function(3)`. ¿Hay algo más en lo que pueda ayudarte?\u001b[0m\u001b[1m> Finished chain.\u001b[0m\n\n {'input': 'what is the value of magic_function(3)?', 'output': 'Aún no puedo obtener el valor de `magic_function(3)`. ¿Hay algo más en lo que pueda ayudarte?'}\n\n from langgraph.errors import GraphRecursionErrorfrom langgraph.prebuilt import create_react_agentRECURSION_LIMIT = 2 * 3 + 1app = create_react_agent(model, tools=tools)try: for chunk in app.stream( {\"messages\": [(\"human\", query)]}, {\"recursion_limit\": RECURSION_LIMIT}, stream_mode=\"values\", ): print(chunk[\"messages\"][-1])except GraphRecursionError: print({\"input\": query, \"output\": \"Agent stopped due to max iterations.\"})\n\n ('human', 'what is the value of magic_function(3)?')content='' additional_kwargs={'tool_calls': [{'id': 'call_pFdKcCu5taDTtOOfX14vEDRp', 'function': {'arguments': '{\"input\":\"3\"}', 'name': 'magic_function'}, 'type': 'function'}]} response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 64, 'total_tokens': 78}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None} id='run-25836468-ba7e-43be-a7cf-76bba06a2a08-0' tool_calls=[{'name': 'magic_function', 'args': {'input': '3'}, 'id': 'call_pFdKcCu5taDTtOOfX14vEDRp'}]content='Sorry, there was an error. Please try again.' name='magic_function' id='1a08b883-9c7b-4969-9e9b-67ce64cdcb5f' tool_call_id='call_pFdKcCu5taDTtOOfX14vEDRp'content='It seems there was an error when trying to apply the magic function. Let me try again.' additional_kwargs={'tool_calls': [{'id': 'call_DA0lpDIkBFg2GHy4WsEcZG4K', 'function': {'arguments': '{\"input\":\"3\"}', 'name': 'magic_function'}, 'type': 'function'}]} response_metadata={'token_usage': {'completion_tokens': 34, 'prompt_tokens': 97, 'total_tokens': 131}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None} id='run-d571b774-0ea3-4e35-8b7d-f32932c3f3cc-0' tool_calls=[{'name': 'magic_function', 'args': {'input': '3'}, 'id': 'call_DA0lpDIkBFg2GHy4WsEcZG4K'}]content='Sorry, there was an error. Please try again.' name='magic_function' id='0b45787b-c82a-487f-9a5a-de129c30460f' tool_call_id='call_DA0lpDIkBFg2GHy4WsEcZG4K'content='It appears that there is a consistent issue when trying to apply the magic function to the input \"3.\" This could be due to various reasons, such as the input not being in the correct format or an internal error.\\n\\nIf you have any other questions or if there\\'s something else you\\'d like to try, please let me know!' response_metadata={'token_usage': {'completion_tokens': 66, 'prompt_tokens': 153, 'total_tokens': 219}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'stop', 'logprobs': None} id='run-50a962e6-21b7-4327-8dea-8e2304062627-0'\n\n`max_execution_time`[​](#max_execution_time \"Direct link to max_execution_time\")\n--------------------------------------------------------------------------------\n\n`AgentExecutor` implements a `max_execution_time` parameter, allowing users to abort a run that exceeds a total time limit.\n\n import time@tooldef magic_function(input: str) -> str: \"\"\"Applies a magic function to an input.\"\"\" time.sleep(2.5) return \"Sorry, there was an error. Please try again.\"tools = [magic_function]agent = create_tool_calling_agent(model, tools, prompt)agent_executor = AgentExecutor( agent=agent, tools=tools, max_execution_time=2, verbose=True,)agent_executor.invoke({\"input\": query})\n\n \u001b[1m> Entering new AgentExecutor chain...\u001b[0m\u001b[32;1m\u001b[1;3mInvoking: `magic_function` with `{'input': '3'}`\u001b[0m\u001b[36;1m\u001b[1;3mSorry, there was an error. Please try again.\u001b[0m\u001b[32;1m\u001b[1;3m\u001b[0m\u001b[1m> Finished chain.\u001b[0m\n\n {'input': 'what is the value of magic_function(3)?', 'output': 'Agent stopped due to max iterations.'}\n\nWith LangGraph's react agent, you can control timeouts on two levels.\n\nYou can set a `step_timeout` to bound each **step**:\n\n from langgraph.prebuilt import create_react_agentapp = create_react_agent(model, tools=tools)# Set the max timeout for each step hereapp.step_timeout = 2try: for chunk in app.stream({\"messages\": [(\"human\", query)]}): print(chunk) print(\"------\")except TimeoutError: print({\"input\": query, \"output\": \"Agent stopped due to max iterations.\"})\n\n {'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_HaQkeCwD5QskzJzFixCBacZ4', 'function': {'arguments': '{\"input\":\"3\"}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 64, 'total_tokens': 78}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-596c9200-771f-436d-8576-72fcb81620f1-0', tool_calls=[{'name': 'magic_function', 'args': {'input': '3'}, 'id': 'call_HaQkeCwD5QskzJzFixCBacZ4'}])]}}------{'input': 'what is the value of magic_function(3)?', 'output': 'Agent stopped due to max iterations.'}\n\nThe other way to set a single max timeout for an entire run is to directly use the python stdlib [asyncio](https://docs.python.org/3/library/asyncio.html) library.\n\n import asynciofrom langgraph.prebuilt import create_react_agentapp = create_react_agent(model, tools=tools)async def stream(app, inputs): async for chunk in app.astream({\"messages\": [(\"human\", query)]}): print(chunk) print(\"------\")try: task = asyncio.create_task(stream(app, {\"messages\": [(\"human\", query)]})) await asyncio.wait_for(task, timeout=3)except TimeoutError: print(\"Task Cancelled.\")\n\n {'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_4agJXUHtmHrOOMogjF6ZuzAv', 'function': {'arguments': '{\"input\":\"3\"}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 64, 'total_tokens': 78}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-a1c77db7-405f-43d9-8d57-751f2ca1a58c-0', tool_calls=[{'name': 'magic_function', 'args': {'input': '3'}, 'id': 'call_4agJXUHtmHrOOMogjF6ZuzAv'}])]}}------Task Cancelled.\n\n`early_stopping_method`[​](#early_stopping_method \"Direct link to early_stopping_method\")\n-----------------------------------------------------------------------------------------\n\nWith LangChain's [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter), you could configure an [early\\_stopping\\_method](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.early_stopping_method) to either return a string saying \"Agent stopped due to iteration limit or time limit.\" (`\"force\"`) or prompt the LLM a final time to respond (`\"generate\"`).\n\n from langchain.agents import AgentExecutor, create_tool_calling_agentfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.tools import toolfrom langchain_openai import ChatOpenAImodel = ChatOpenAI(model=\"gpt-4o\")prompt = ChatPromptTemplate.from_messages( [ (\"system\", \"You are a helpful assistant.\"), (\"human\", \"{input}\"), # Placeholders fill up a **list** of messages (\"placeholder\", \"{agent_scratchpad}\"), ])@tooldef magic_function(input: int) -> int: \"\"\"Applies a magic function to an input.\"\"\" return \"Sorry there was an error, please try again.\"tools = [magic_function]agent = create_tool_calling_agent(model, tools, prompt=prompt)agent_executor = AgentExecutor( agent=agent, tools=tools, early_stopping_method=\"force\", max_iterations=1)result = agent_executor.invoke({\"input\": query})print(\"Output with early_stopping_method='force':\")print(result[\"output\"])\n\n**API Reference:**[AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html) | [create\\_tool\\_calling\\_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.tool_calling_agent.base.create_tool_calling_agent.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [tool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html)\n\n Output with early_stopping_method='force':Agent stopped due to max iterations.\n\n#### In LangGraph[​](#in-langgraph-2 \"Direct link to In LangGraph\")\n\nIn LangGraph, you can explicitly handle the response behavior outside the agent, since the full state can be accessed.\n\n from langgraph.errors import GraphRecursionErrorfrom langgraph.prebuilt import create_react_agentRECURSION_LIMIT = 2 * 1 + 1app = create_react_agent(model, tools=tools)try: for chunk in app.stream( {\"messages\": [(\"human\", query)]}, {\"recursion_limit\": RECURSION_LIMIT}, stream_mode=\"values\", ): print(chunk[\"messages\"][-1])except GraphRecursionError: print({\"input\": query, \"output\": \"Agent stopped due to max iterations.\"})\n\n ('human', 'what is the value of magic_function(3)?')content='' additional_kwargs={'tool_calls': [{'id': 'call_bTURmOn9C8zslmn0kMFeykIn', 'function': {'arguments': '{\"input\":3}', 'name': 'magic_function'}, 'type': 'function'}]} response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 64, 'total_tokens': 78}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None} id='run-0844a504-7e6b-4ea6-a069-7017e38121ee-0' tool_calls=[{'name': 'magic_function', 'args': {'input': 3}, 'id': 'call_bTURmOn9C8zslmn0kMFeykIn'}]content='Sorry there was an error, please try again.' name='magic_function' id='00d5386f-eb23-4628-9a29-d9ce6a7098cc' tool_call_id='call_bTURmOn9C8zslmn0kMFeykIn'content='' additional_kwargs={'tool_calls': [{'id': 'call_JYqvvvWmXow2u012DuPoDHFV', 'function': {'arguments': '{\"input\":3}', 'name': 'magic_function'}, 'type': 'function'}]} response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 96, 'total_tokens': 110}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None} id='run-b73b1b1c-c829-4348-98cd-60b315c85448-0' tool_calls=[{'name': 'magic_function', 'args': {'input': 3}, 'id': 'call_JYqvvvWmXow2u012DuPoDHFV'}]{'input': 'what is the value of magic_function(3)?', 'output': 'Agent stopped due to max iterations.'}\n\n`trim_intermediate_steps`[​](#trim_intermediate_steps \"Direct link to trim_intermediate_steps\")\n-----------------------------------------------------------------------------------------------\n\nWith LangChain's [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor), you could trim the intermediate steps of long-running agents using [trim\\_intermediate\\_steps](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.trim_intermediate_steps), which is either an integer (indicating the agent should keep the last N steps) or a custom function.\n\nFor instance, we could trim the value so the agent only sees the most recent intermediate step.\n\n from langchain.agents import AgentExecutor, create_tool_calling_agentfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.tools import toolfrom langchain_openai import ChatOpenAImodel = ChatOpenAI(model=\"gpt-4o\")prompt = ChatPromptTemplate.from_messages( [ (\"system\", \"You are a helpful assistant.\"), (\"human\", \"{input}\"), # Placeholders fill up a **list** of messages (\"placeholder\", \"{agent_scratchpad}\"), ])magic_step_num = 1@tooldef magic_function(input: int) -> int: \"\"\"Applies a magic function to an input.\"\"\" global magic_step_num print(f\"Call number: {magic_step_num}\") magic_step_num += 1 return input + magic_step_numtools = [magic_function]agent = create_tool_calling_agent(model, tools, prompt=prompt)def trim_steps(steps: list): # Let's give the agent amnesia return []agent_executor = AgentExecutor( agent=agent, tools=tools, trim_intermediate_steps=trim_steps)query = \"Call the magic function 4 times in sequence with the value 3. You cannot call it multiple times at once.\"for step in agent_executor.stream({\"input\": query}): pass\n\n**API Reference:**[AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html) | [create\\_tool\\_calling\\_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.tool_calling_agent.base.create_tool_calling_agent.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [tool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html)\n\n Call number: 1Call number: 2Call number: 3Call number: 4Call number: 5Call number: 6Call number: 7Call number: 8Call number: 9Call number: 10Call number: 11Call number: 12Call number: 13Call number: 14``````outputStopping agent prematurely due to triggering stop condition``````outputCall number: 15\n\n#### In LangGraph[​](#in-langgraph-3 \"Direct link to In LangGraph\")\n\nWe can use the [`messages_modifier`](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent) just as before when passing in [prompt templates](#prompt-templates).\n\n from langchain_core.messages import AnyMessagefrom langgraph.errors import GraphRecursionErrorfrom langgraph.prebuilt import create_react_agentmagic_step_num = 1@tooldef magic_function(input: int) -> int: \"\"\"Applies a magic function to an input.\"\"\" global magic_step_num print(f\"Call number: {magic_step_num}\") magic_step_num += 1 return input + magic_step_numtools = [magic_function]def _modify_messages(messages: list[AnyMessage]): # Give the agent amnesia, only keeping the original user query return [(\"system\", \"You are a helpful assistant\"), messages[0]]app = create_react_agent(model, tools, messages_modifier=_modify_messages)try: for step in app.stream({\"messages\": [(\"human\", query)]}, stream_mode=\"updates\"): passexcept GraphRecursionError as e: print(\"Stopping agent prematurely due to triggering stop condition\")\n\n Call number: 1Call number: 2Call number: 3Call number: 4Call number: 5Call number: 6Call number: 7Call number: 8Call number: 9Call number: 10Call number: 11Call number: 12Stopping agent prematurely due to triggering stop condition\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/migrate_agent.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to add message history\n\n](/v0.2/docs/how_to/message_history/)[\n\nNext\n\nHow to retrieve using multiple vectors per document\n\n](/v0.2/docs/how_to/multi_vector/)\n\n* [Basic Usage](#basic-usage)\n* [Prompt Templates](#prompt-templates)\n* [Memory](#memory)\n* [Iterating through steps](#iterating-through-steps)\n* [`return_intermediate_steps`](#return_intermediate_steps)\n* [`max_iterations`](#max_iterations)\n* [`max_execution_time`](#max_execution_time)\n* [`early_stopping_method`](#early_stopping_method)\n* [`trim_intermediate_steps`](#trim_intermediate_steps)"},"last_modified":{"kind":"null"}}},{"rowIdx":1395,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/multimodal_inputs/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to pass multimodal data directly to models\n\nOn this page\n\nHow to pass multimodal data directly to models\n==============================================\n\nHere we demonstrate how to pass multimodal input directly to models. We currently expect all input to be passed in the same format as [OpenAI expects](https://platform.openai.com/docs/guides/vision). For other model providers that support multimodal input, we have added logic inside the class to convert to the expected format.\n\nIn this example we will ask a model to describe an image.\n\n image_url = \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\"\n\n from langchain_core.messages import HumanMessagefrom langchain_openai import ChatOpenAImodel = ChatOpenAI(model=\"gpt-4o\")\n\n**API Reference:**[HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html)\n\nThe most commonly supported way to pass in images is to pass it in as a byte string. This should work for most model integrations.\n\n import base64import httpximage_data = base64.b64encode(httpx.get(image_url).content).decode(\"utf-8\")\n\n message = HumanMessage( content=[ {\"type\": \"text\", \"text\": \"describe the weather in this image\"}, { \"type\": \"image_url\", \"image_url\": {\"url\": f\"data:image/jpeg;base64,{image_data}\"}, }, ],)response = model.invoke([message])print(response.content)\n\n The weather in the image appears to be clear and pleasant. The sky is mostly blue with scattered, light clouds, suggesting a sunny day with minimal cloud cover. There is no indication of rain or strong winds, and the overall scene looks bright and calm. The lush green grass and clear visibility further indicate good weather conditions.\n\nWe can feed the image URL directly in a content block of type \"image\\_url\". Note that only some model providers support this.\n\n message = HumanMessage( content=[ {\"type\": \"text\", \"text\": \"describe the weather in this image\"}, {\"type\": \"image_url\", \"image_url\": {\"url\": image_url}}, ],)response = model.invoke([message])print(response.content)\n\n The weather in the image appears to be clear and sunny. The sky is mostly blue with a few scattered clouds, suggesting good visibility and a likely pleasant temperature. The bright sunlight is casting distinct shadows on the grass and vegetation, indicating it is likely daytime, possibly late morning or early afternoon. The overall ambiance suggests a warm and inviting day, suitable for outdoor activities.\n\nWe can also pass in multiple images.\n\n message = HumanMessage( content=[ {\"type\": \"text\", \"text\": \"are these two images the same?\"}, {\"type\": \"image_url\", \"image_url\": {\"url\": image_url}}, {\"type\": \"image_url\", \"image_url\": {\"url\": image_url}}, ],)response = model.invoke([message])print(response.content)\n\n Yes, the two images are the same. They both depict a wooden boardwalk extending through a grassy field under a blue sky with light clouds. The scenery, lighting, and composition are identical.\n\nTool calls[​](#tool-calls \"Direct link to Tool calls\")\n------------------------------------------------------\n\nSome multimodal models support [tool calling](/v0.2/docs/concepts/#functiontool-calling) features as well. To call tools using such models, simply bind tools to them in the [usual way](/v0.2/docs/how_to/tool_calling/), and invoke the model using content blocks of the desired type (e.g., containing image data).\n\n from typing import Literalfrom langchain_core.tools import tool@tooldef weather_tool(weather: Literal[\"sunny\", \"cloudy\", \"rainy\"]) -> None: \"\"\"Describe the weather\"\"\" passmodel_with_tools = model.bind_tools([weather_tool])message = HumanMessage( content=[ {\"type\": \"text\", \"text\": \"describe the weather in this image\"}, {\"type\": \"image_url\", \"image_url\": {\"url\": image_url}}, ],)response = model_with_tools.invoke([message])print(response.tool_calls)\n\n**API Reference:**[tool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html)\n\n [{'name': 'weather_tool', 'args': {'weather': 'sunny'}, 'id': 'call_BSX4oq4SKnLlp2WlzDhToHBr'}]\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/multimodal_inputs.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to retrieve using multiple vectors per document\n\n](/v0.2/docs/how_to/multi_vector/)[\n\nNext\n\nHow to use multimodal prompts\n\n](/v0.2/docs/how_to/multimodal_prompts/)\n\n* [Tool calls](#tool-calls)"},"last_modified":{"kind":"null"}}},{"rowIdx":1396,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/tutorials/query_analysis/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [Tutorials](/v0.2/docs/tutorials/)\n* Build a Query Analysis System\n\nOn this page\n\nBuild a Query Analysis System\n=============================\n\nPrerequisites\n\nThis guide assumes familiarity with the following concepts:\n\n* [Document loaders](/v0.2/docs/concepts/#document-loaders)\n* [Chat models](/v0.2/docs/concepts/#chat-models)\n* [Embeddings](/v0.2/docs/concepts/#embedding-models)\n* [Vector stores](/v0.2/docs/concepts/#vector-stores)\n* [Retrieval](/v0.2/docs/concepts/#retrieval)\n\nThis page will show how to use query analysis in a basic end-to-end example. This will cover creating a simple search engine, showing a failure mode that occurs when passing a raw user question to that search, and then an example of how query analysis can help address that issue. There are MANY different query analysis techniques and this end-to-end example will not show all of them.\n\nFor the purpose of this example, we will do retrieval over the LangChain YouTube videos.\n\nSetup[​](#setup \"Direct link to Setup\")\n---------------------------------------\n\n#### Install dependencies[​](#install-dependencies \"Direct link to Install dependencies\")\n\n # %pip install -qU langchain langchain-community langchain-openai youtube-transcript-api pytube langchain-chroma\n\n#### Set environment variables[​](#set-environment-variables \"Direct link to Set environment variables\")\n\nWe'll use OpenAI in this example:\n\n import getpassimport osos.environ[\"OPENAI_API_KEY\"] = getpass.getpass()# Optional, uncomment to trace runs with LangSmith. Sign up here: https://smith.langchain.com.# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()\n\n### Load documents[​](#load-documents \"Direct link to Load documents\")\n\nWe can use the `YouTubeLoader` to load transcripts of a few LangChain videos:\n\n from langchain_community.document_loaders import YoutubeLoaderurls = [ \"https://www.youtube.com/watch?v=HAn9vnJy6S4\", \"https://www.youtube.com/watch?v=dA1cHGACXCo\", \"https://www.youtube.com/watch?v=ZcEMLz27sL4\", \"https://www.youtube.com/watch?v=hvAPnpSfSGo\", \"https://www.youtube.com/watch?v=EhlPDL4QrWY\", \"https://www.youtube.com/watch?v=mmBo8nlu2j0\", \"https://www.youtube.com/watch?v=rQdibOsL1ps\", \"https://www.youtube.com/watch?v=28lC4fqukoc\", \"https://www.youtube.com/watch?v=es-9MgxB-uc\", \"https://www.youtube.com/watch?v=wLRHwKuKvOE\", \"https://www.youtube.com/watch?v=ObIltMaRJvY\", \"https://www.youtube.com/watch?v=DjuXACWYkkU\", \"https://www.youtube.com/watch?v=o7C9ld6Ln-M\",]docs = []for url in urls: docs.extend(YoutubeLoader.from_youtube_url(url, add_video_info=True).load())\n\n**API Reference:**[YoutubeLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.youtube.YoutubeLoader.html)\n\n import datetime# Add some additional metadata: what year the video was publishedfor doc in docs: doc.metadata[\"publish_year\"] = int( datetime.datetime.strptime( doc.metadata[\"publish_date\"], \"%Y-%m-%d %H:%M:%S\" ).strftime(\"%Y\") )\n\nHere are the titles of the videos we've loaded:\n\n [doc.metadata[\"title\"] for doc in docs]\n\n ['OpenGPTs', 'Building a web RAG chatbot: using LangChain, Exa (prev. Metaphor), LangSmith, and Hosted Langserve', 'Streaming Events: Introducing a new `stream_events` method', 'LangGraph: Multi-Agent Workflows', 'Build and Deploy a RAG app with Pinecone Serverless', 'Auto-Prompt Builder (with Hosted LangServe)', 'Build a Full Stack RAG App With TypeScript', 'Getting Started with Multi-Modal LLMs', 'SQL Research Assistant', 'Skeleton-of-Thought: Building a New Template from Scratch', 'Benchmarking RAG over LangChain Docs', 'Building a Research Assistant from Scratch', 'LangServe and LangChain Templates Webinar']\n\nHere's the metadata associated with each video. We can see that each document also has a title, view count, publication date, and length:\n\n docs[0].metadata\n\n {'source': 'HAn9vnJy6S4', 'title': 'OpenGPTs', 'description': 'Unknown', 'view_count': 7210, 'thumbnail_url': 'https://i.ytimg.com/vi/HAn9vnJy6S4/hq720.jpg', 'publish_date': '2024-01-31 00:00:00', 'length': 1530, 'author': 'LangChain', 'publish_year': 2024}\n\nAnd here's a sample from a document's contents:\n\n docs[0].page_content[:500]\n\n \"hello today I want to talk about open gpts open gpts is a project that we built here at linkchain uh that replicates the GPT store in a few ways so it creates uh end user-facing friendly interface to create different Bots and these Bots can have access to different tools and they can uh be given files to retrieve things over and basically it's a way to create a variety of bots and expose the configuration of these Bots to end users it's all open source um it can be used with open AI it can be us\"\n\n### Indexing documents[​](#indexing-documents \"Direct link to Indexing documents\")\n\nWhenever we perform retrieval we need to create an index of documents that we can query. We'll use a vector store to index our documents, and we'll chunk them first to make our retrievals more concise and precise:\n\n from langchain_chroma import Chromafrom langchain_openai import OpenAIEmbeddingsfrom langchain_text_splitters import RecursiveCharacterTextSplittertext_splitter = RecursiveCharacterTextSplitter(chunk_size=2000)chunked_docs = text_splitter.split_documents(docs)embeddings = OpenAIEmbeddings(model=\"text-embedding-3-small\")vectorstore = Chroma.from_documents( chunked_docs, embeddings,)\n\n**API Reference:**[OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) | [RecursiveCharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.RecursiveCharacterTextSplitter.html)\n\nRetrieval without query analysis[​](#retrieval-without-query-analysis \"Direct link to Retrieval without query analysis\")\n------------------------------------------------------------------------------------------------------------------------\n\nWe can perform similarity search on a user question directly to find chunks relevant to the question:\n\n search_results = vectorstore.similarity_search(\"how do I build a RAG agent\")print(search_results[0].metadata[\"title\"])print(search_results[0].page_content[:500])\n\n Build and Deploy a RAG app with Pinecone Serverlesshi this is Lance from the Lang chain team and today we're going to be building and deploying a rag app using pine con serval list from scratch so we're going to kind of walk through all the code required to do this and I'll use these slides as kind of a guide to kind of lay the the ground work um so first what is rag so under capoy has this pretty nice visualization that shows LMS as a kernel of a new kind of operating system and of course one of the core components of our operating system is th\n\nThis works pretty well! Our first result is quite relevant to the question.\n\nWhat if we wanted to search for results from a specific time period?\n\n search_results = vectorstore.similarity_search(\"videos on RAG published in 2023\")print(search_results[0].metadata[\"title\"])print(search_results[0].metadata[\"publish_date\"])print(search_results[0].page_content[:500])\n\n OpenGPTs2024-01-31hardcoded that it will always do a retrieval step here the assistant decides whether to do a retrieval step or not sometimes this is good sometimes this is bad sometimes it you don't need to do a retrieval step when I said hi it didn't need to call it tool um but other times you know the the llm might mess up and not realize that it needs to do a retrieval step and so the rag bot will always do a retrieval step so it's more focused there because this is also a simpler architecture so it's always\n\nOur first result is from 2024 (despite us asking for videos from 2023), and not very relevant to the input. Since we're just searching against document contents, there's no way for the results to be filtered on any document attributes.\n\nThis is just one failure mode that can arise. Let's now take a look at how a basic form of query analysis can fix it!\n\nQuery analysis[​](#query-analysis \"Direct link to Query analysis\")\n------------------------------------------------------------------\n\nWe can use query analysis to improve the results of retrieval. This will involve defining a **query schema** that contains some date filters and use a function-calling model to convert a user question into a structured queries.\n\n### Query schema[​](#query-schema \"Direct link to Query schema\")\n\nIn this case we'll have explicit min and max attributes for publication date so that it can be filtered on.\n\n from typing import Optionalfrom langchain_core.pydantic_v1 import BaseModel, Fieldclass Search(BaseModel): \"\"\"Search over a database of tutorial videos about a software library.\"\"\" query: str = Field( ..., description=\"Similarity search query applied to video transcripts.\", ) publish_year: Optional[int] = Field(None, description=\"Year video was published\")\n\n### Query generation[​](#query-generation \"Direct link to Query generation\")\n\nTo convert user questions to structured queries we'll make use of OpenAI's tool-calling API. Specifically we'll use the new [ChatModel.with\\_structured\\_output()](/v0.2/docs/how_to/structured_output/) constructor to handle passing the schema to the model and parsing the output.\n\n from langchain_core.prompts import ChatPromptTemplatefrom langchain_core.runnables import RunnablePassthroughfrom langchain_openai import ChatOpenAIsystem = \"\"\"You are an expert at converting user questions into database queries. \\You have access to a database of tutorial videos about a software library for building LLM-powered applications. \\Given a question, return a list of database queries optimized to retrieve the most relevant results.If there are acronyms or words you are not familiar with, do not try to rephrase them.\"\"\"prompt = ChatPromptTemplate.from_messages( [ (\"system\", system), (\"human\", \"{question}\"), ])llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)structured_llm = llm.with_structured_output(Search)query_analyzer = {\"question\": RunnablePassthrough()} | prompt | structured_llm\n\n**API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html)\n\n /Users/bagatur/langchain/libs/core/langchain_core/_api/beta_decorator.py:86: LangChainBetaWarning: The function `with_structured_output` is in beta. It is actively being worked on, so the API may change. warn_beta(\n\nLet's see what queries our analyzer generates for the questions we searched earlier:\n\n query_analyzer.invoke(\"how do I build a RAG agent\")\n\n Search(query='build RAG agent', publish_year=None)\n\n query_analyzer.invoke(\"videos on RAG published in 2023\")\n\n Search(query='RAG', publish_year=2023)\n\nRetrieval with query analysis[​](#retrieval-with-query-analysis \"Direct link to Retrieval with query analysis\")\n---------------------------------------------------------------------------------------------------------------\n\nOur query analysis looks pretty good; now let's try using our generated queries to actually perform retrieval.\n\n**Note:** in our example, we specified `tool_choice=\"Search\"`. This will force the LLM to call one - and only one - tool, meaning that we will always have one optimized query to look up. Note that this is not always the case - see other guides for how to deal with situations when no - or multiple - optmized queries are returned.\n\n from typing import Listfrom langchain_core.documents import Document\n\n**API Reference:**[Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html)\n\n def retrieval(search: Search) -> List[Document]: if search.publish_year is not None: # This is syntax specific to Chroma, # the vector database we are using. _filter = {\"publish_year\": {\"$eq\": search.publish_year}} else: _filter = None return vectorstore.similarity_search(search.query, filter=_filter)\n\n retrieval_chain = query_analyzer | retrieval\n\nWe can now run this chain on the problematic input from before, and see that it yields only results from that year!\n\n results = retrieval_chain.invoke(\"RAG tutorial published in 2023\")\n\n [(doc.metadata[\"title\"], doc.metadata[\"publish_date\"]) for doc in results]\n\n [('Getting Started with Multi-Modal LLMs', '2023-12-20 00:00:00'), ('LangServe and LangChain Templates Webinar', '2023-11-02 00:00:00'), ('Getting Started with Multi-Modal LLMs', '2023-12-20 00:00:00'), ('Building a Research Assistant from Scratch', '2023-11-16 00:00:00')]\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/tutorials/query_analysis.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nBuild a Simple LLM Application with LCEL\n\n](/v0.2/docs/tutorials/llm_chain/)[\n\nNext\n\nBuild a Chatbot\n\n](/v0.2/docs/tutorials/chatbot/)\n\n* [Setup](#setup)\n * [Load documents](#load-documents)\n * [Indexing documents](#indexing-documents)\n* [Retrieval without query analysis](#retrieval-without-query-analysis)\n* [Query analysis](#query-analysis)\n * [Query schema](#query-schema)\n * [Query generation](#query-generation)\n* [Retrieval with query analysis](#retrieval-with-query-analysis)"},"last_modified":{"kind":"null"}}},{"rowIdx":1397,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* How-to guides\n\nOn this page\n\nHow-to guides\n=============\n\nHere you’ll find answers to “How do I….?” types of questions. These guides are _goal-oriented_ and _concrete_; they're meant to help you complete a specific task. For conceptual explanations see the [Conceptual guide](/v0.2/docs/concepts/). For end-to-end walkthroughs see [Tutorials](/v0.2/docs/tutorials/). For comprehensive descriptions of every class and function see the [API Reference](https://api.python.langchain.com/en/latest/).\n\nInstallation[​](#installation \"Direct link to Installation\")\n------------------------------------------------------------\n\n* [How to: install LangChain packages](/v0.2/docs/how_to/installation/)\n* [How to: use LangChain with different Pydantic versions](/v0.2/docs/how_to/pydantic_compatibility/)\n\nKey features[​](#key-features \"Direct link to Key features\")\n------------------------------------------------------------\n\nThis highlights functionality that is core to using LangChain.\n\n* [How to: return structured data from a model](/v0.2/docs/how_to/structured_output/)\n* [How to: use a model to call tools](/v0.2/docs/how_to/tool_calling/)\n* [How to: stream runnables](/v0.2/docs/how_to/streaming/)\n* [How to: debug your LLM apps](/v0.2/docs/how_to/debugging/)\n\nLangChain Expression Language (LCEL)[​](#langchain-expression-language-lcel \"Direct link to LangChain Expression Language (LCEL)\")\n----------------------------------------------------------------------------------------------------------------------------------\n\n[LangChain Expression Language](/v0.2/docs/concepts/#langchain-expression-language-lcel) is a way to create arbitrary custom chains. It is built on the [Runnable](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html) protocol.\n\n[**LCEL cheatsheet**](/v0.2/docs/how_to/lcel_cheatsheet/): For a quick overview of how to use the main LCEL primitives.\n\n* [How to: chain runnables](/v0.2/docs/how_to/sequence/)\n* [How to: stream runnables](/v0.2/docs/how_to/streaming/)\n* [How to: invoke runnables in parallel](/v0.2/docs/how_to/parallel/)\n* [How to: add default invocation args to runnables](/v0.2/docs/how_to/binding/)\n* [How to: turn any function into a runnable](/v0.2/docs/how_to/functions/)\n* [How to: pass through inputs from one chain step to the next](/v0.2/docs/how_to/passthrough/)\n* [How to: configure runnable behavior at runtime](/v0.2/docs/how_to/configure/)\n* [How to: add message history (memory) to a chain](/v0.2/docs/how_to/message_history/)\n* [How to: route between sub-chains](/v0.2/docs/how_to/routing/)\n* [How to: create a dynamic (self-constructing) chain](/v0.2/docs/how_to/dynamic_chain/)\n* [How to: inspect runnables](/v0.2/docs/how_to/inspect/)\n* [How to: add fallbacks to a runnable](/v0.2/docs/how_to/fallbacks/)\n\nComponents[​](#components \"Direct link to Components\")\n------------------------------------------------------\n\nThese are the core building blocks you can use when building applications.\n\n### Prompt templates[​](#prompt-templates \"Direct link to Prompt templates\")\n\n[Prompt Templates](/v0.2/docs/concepts/#prompt-templates) are responsible for formatting user input into a format that can be passed to a language model.\n\n* [How to: use few shot examples](/v0.2/docs/how_to/few_shot_examples/)\n* [How to: use few shot examples in chat models](/v0.2/docs/how_to/few_shot_examples_chat/)\n* [How to: partially format prompt templates](/v0.2/docs/how_to/prompts_partial/)\n* [How to: compose prompts together](/v0.2/docs/how_to/prompts_composition/)\n\n### Example selectors[​](#example-selectors \"Direct link to Example selectors\")\n\n[Example Selectors](/v0.2/docs/concepts/#example-selectors) are responsible for selecting the correct few shot examples to pass to the prompt.\n\n* [How to: use example selectors](/v0.2/docs/how_to/example_selectors/)\n* [How to: select examples by length](/v0.2/docs/how_to/example_selectors_length_based/)\n* [How to: select examples by semantic similarity](/v0.2/docs/how_to/example_selectors_similarity/)\n* [How to: select examples by semantic ngram overlap](/v0.2/docs/how_to/example_selectors_ngram/)\n* [How to: select examples by maximal marginal relevance](/v0.2/docs/how_to/example_selectors_mmr/)\n\n### Chat models[​](#chat-models \"Direct link to Chat models\")\n\n[Chat Models](/v0.2/docs/concepts/#chat-models) are newer forms of language models that take messages in and output a message.\n\n* [How to: do function/tool calling](/v0.2/docs/how_to/tool_calling/)\n* [How to: get models to return structured output](/v0.2/docs/how_to/structured_output/)\n* [How to: cache model responses](/v0.2/docs/how_to/chat_model_caching/)\n* [How to: get log probabilities](/v0.2/docs/how_to/logprobs/)\n* [How to: create a custom chat model class](/v0.2/docs/how_to/custom_chat_model/)\n* [How to: stream a response back](/v0.2/docs/how_to/chat_streaming/)\n* [How to: track token usage](/v0.2/docs/how_to/chat_token_usage_tracking/)\n* [How to: track response metadata across providers](/v0.2/docs/how_to/response_metadata/)\n* [How to: let your end users choose their model](/v0.2/docs/how_to/chat_models_universal_init/)\n* [How to: use chat model to call tools](/v0.2/docs/how_to/tool_calling/)\n* [How to: stream tool calls](/v0.2/docs/how_to/tool_streaming/)\n* [How to: few shot prompt tool behavior](/v0.2/docs/how_to/tools_few_shot/)\n* [How to: bind model-specific formated tools](/v0.2/docs/how_to/tools_model_specific/)\n* [How to: force specific tool call](/v0.2/docs/how_to/tool_choice/)\n* [How to: init any model in one line](/v0.2/docs/how_to/chat_models_universal_init/)\n\n### Messages[​](#messages \"Direct link to Messages\")\n\n[Messages](/v0.2/docs/concepts/#messages) are the input and output of chat models. They have some `content` and a `role`, which describes the source of the message.\n\n* [How to: trim messages](/v0.2/docs/how_to/trim_messages/)\n* [How to: filter messages](/v0.2/docs/how_to/filter_messages/)\n* [How to: merge consecutive messages of the same type](/v0.2/docs/how_to/merge_message_runs/)\n\n### LLMs[​](#llms \"Direct link to LLMs\")\n\nWhat LangChain calls [LLMs](/v0.2/docs/concepts/#llms) are older forms of language models that take a string in and output a string.\n\n* [How to: cache model responses](/v0.2/docs/how_to/llm_caching/)\n* [How to: create a custom LLM class](/v0.2/docs/how_to/custom_llm/)\n* [How to: stream a response back](/v0.2/docs/how_to/streaming_llm/)\n* [How to: track token usage](/v0.2/docs/how_to/llm_token_usage_tracking/)\n* [How to: work with local LLMs](/v0.2/docs/how_to/local_llms/)\n\n### Output parsers[​](#output-parsers \"Direct link to Output parsers\")\n\n[Output Parsers](/v0.2/docs/concepts/#output-parsers) are responsible for taking the output of an LLM and parsing into more structured format.\n\n* [How to: use output parsers to parse an LLM response into structured format](/v0.2/docs/how_to/output_parser_structured/)\n* [How to: parse JSON output](/v0.2/docs/how_to/output_parser_json/)\n* [How to: parse XML output](/v0.2/docs/how_to/output_parser_xml/)\n* [How to: parse YAML output](/v0.2/docs/how_to/output_parser_yaml/)\n* [How to: retry when output parsing errors occur](/v0.2/docs/how_to/output_parser_retry/)\n* [How to: try to fix errors in output parsing](/v0.2/docs/how_to/output_parser_fixing/)\n* [How to: write a custom output parser class](/v0.2/docs/how_to/output_parser_custom/)\n\n### Document loaders[​](#document-loaders \"Direct link to Document loaders\")\n\n[Document Loaders](/v0.2/docs/concepts/#document-loaders) are responsible for loading documents from a variety of sources.\n\n* [How to: load CSV data](/v0.2/docs/how_to/document_loader_csv/)\n* [How to: load data from a directory](/v0.2/docs/how_to/document_loader_directory/)\n* [How to: load HTML data](/v0.2/docs/how_to/document_loader_html/)\n* [How to: load JSON data](/v0.2/docs/how_to/document_loader_json/)\n* [How to: load Markdown data](/v0.2/docs/how_to/document_loader_markdown/)\n* [How to: load Microsoft Office data](/v0.2/docs/how_to/document_loader_office_file/)\n* [How to: load PDF files](/v0.2/docs/how_to/document_loader_pdf/)\n* [How to: write a custom document loader](/v0.2/docs/how_to/document_loader_custom/)\n\n### Text splitters[​](#text-splitters \"Direct link to Text splitters\")\n\n[Text Splitters](/v0.2/docs/concepts/#text-splitters) take a document and split into chunks that can be used for retrieval.\n\n* [How to: recursively split text](/v0.2/docs/how_to/recursive_text_splitter/)\n* [How to: split by HTML headers](/v0.2/docs/how_to/HTML_header_metadata_splitter/)\n* [How to: split by HTML sections](/v0.2/docs/how_to/HTML_section_aware_splitter/)\n* [How to: split by character](/v0.2/docs/how_to/character_text_splitter/)\n* [How to: split code](/v0.2/docs/how_to/code_splitter/)\n* [How to: split Markdown by headers](/v0.2/docs/how_to/markdown_header_metadata_splitter/)\n* [How to: recursively split JSON](/v0.2/docs/how_to/recursive_json_splitter/)\n* [How to: split text into semantic chunks](/v0.2/docs/how_to/semantic-chunker/)\n* [How to: split by tokens](/v0.2/docs/how_to/split_by_token/)\n\n### Embedding models[​](#embedding-models \"Direct link to Embedding models\")\n\n[Embedding Models](/v0.2/docs/concepts/#embedding-models) take a piece of text and create a numerical representation of it.\n\n* [How to: embed text data](/v0.2/docs/how_to/embed_text/)\n* [How to: cache embedding results](/v0.2/docs/how_to/caching_embeddings/)\n\n### Vector stores[​](#vector-stores \"Direct link to Vector stores\")\n\n[Vector stores](/v0.2/docs/concepts/#vector-stores) are databases that can efficiently store and retrieve embeddings.\n\n* [How to: use a vector store to retrieve data](/v0.2/docs/how_to/vectorstores/)\n\n### Retrievers[​](#retrievers \"Direct link to Retrievers\")\n\n[Retrievers](/v0.2/docs/concepts/#retrievers) are responsible for taking a query and returning relevant documents.\n\n* [How to: use a vector store to retrieve data](/v0.2/docs/how_to/vectorstore_retriever/)\n* [How to: generate multiple queries to retrieve data for](/v0.2/docs/how_to/MultiQueryRetriever/)\n* [How to: use contextual compression to compress the data retrieved](/v0.2/docs/how_to/contextual_compression/)\n* [How to: write a custom retriever class](/v0.2/docs/how_to/custom_retriever/)\n* [How to: add similarity scores to retriever results](/v0.2/docs/how_to/add_scores_retriever/)\n* [How to: combine the results from multiple retrievers](/v0.2/docs/how_to/ensemble_retriever/)\n* [How to: reorder retrieved results to mitigate the \"lost in the middle\" effect](/v0.2/docs/how_to/long_context_reorder/)\n* [How to: generate multiple embeddings per document](/v0.2/docs/how_to/multi_vector/)\n* [How to: retrieve the whole document for a chunk](/v0.2/docs/how_to/parent_document_retriever/)\n* [How to: generate metadata filters](/v0.2/docs/how_to/self_query/)\n* [How to: create a time-weighted retriever](/v0.2/docs/how_to/time_weighted_vectorstore/)\n* [How to: use hybrid vector and keyword retrieval](/v0.2/docs/how_to/hybrid/)\n\n### Indexing[​](#indexing \"Direct link to Indexing\")\n\nIndexing is the process of keeping your vectorstore in-sync with the underlying data source.\n\n* [How to: reindex data to keep your vectorstore in-sync with the underlying data source](/v0.2/docs/how_to/indexing/)\n\n### Tools[​](#tools \"Direct link to Tools\")\n\nLangChain [Tools](/v0.2/docs/concepts/#tools) contain a description of the tool (to pass to the language model) as well as the implementation of the function to call.\n\n* [How to: create custom tools](/v0.2/docs/how_to/custom_tools/)\n* [How to: use built-in tools and built-in toolkits](/v0.2/docs/how_to/tools_builtin/)\n* [How to: use chat model to call tools](/v0.2/docs/how_to/tool_calling/)\n* [How to: pass tool results back to model](/v0.2/docs/how_to/tool_results_pass_to_model/)\n* [How to: add ad-hoc tool calling capability to LLMs and chat models](/v0.2/docs/how_to/tools_prompting/)\n* [How to: pass run time values to tools](/v0.2/docs/how_to/tool_runtime/)\n* [How to: add a human in the loop to tool usage](/v0.2/docs/how_to/tools_human/)\n* [How to: handle errors when calling tools](/v0.2/docs/how_to/tools_error/)\n* [How to: disable parallel tool calling](/v0.2/docs/how_to/tool_choice/)\n\n### Multimodal[​](#multimodal \"Direct link to Multimodal\")\n\n* [How to: pass multimodal data directly to models](/v0.2/docs/how_to/multimodal_inputs/)\n* [How to: use multimodal prompts](/v0.2/docs/how_to/multimodal_prompts/)\n\n### Agents[​](#agents \"Direct link to Agents\")\n\nnote\n\nFor in depth how-to guides for agents, please check out [LangGraph](https://github.com/langchain-ai/langgraph) documentation.\n\n* [How to: use legacy LangChain Agents (AgentExecutor)](/v0.2/docs/how_to/agent_executor/)\n* [How to: migrate from legacy LangChain agents to LangGraph](/v0.2/docs/how_to/migrate_agent/)\n\n### Callbacks[​](#callbacks \"Direct link to Callbacks\")\n\n[Callbacks](/v0.2/docs/concepts/#callbacks) allow you to hook into the various stages of your LLM application's execution.\n\n* [How to: pass in callbacks at runtime](/v0.2/docs/how_to/callbacks_runtime/)\n* [How to: attach callbacks to a module](/v0.2/docs/how_to/callbacks_attach/)\n* [How to: pass callbacks into a module constructor](/v0.2/docs/how_to/callbacks_constructor/)\n* [How to: create custom callback handlers](/v0.2/docs/how_to/custom_callbacks/)\n* [How to: use callbacks in async environments](/v0.2/docs/how_to/callbacks_async/)\n\n### Custom[​](#custom \"Direct link to Custom\")\n\nAll of LangChain components can easily be extended to support your own versions.\n\n* [How to: create a custom chat model class](/v0.2/docs/how_to/custom_chat_model/)\n* [How to: create a custom LLM class](/v0.2/docs/how_to/custom_llm/)\n* [How to: write a custom retriever class](/v0.2/docs/how_to/custom_retriever/)\n* [How to: write a custom document loader](/v0.2/docs/how_to/document_loader_custom/)\n* [How to: write a custom output parser class](/v0.2/docs/how_to/output_parser_custom/)\n* [How to: create custom callback handlers](/v0.2/docs/how_to/custom_callbacks/)\n* [How to: define a custom tool](/v0.2/docs/how_to/custom_tools/)\n\n### Serialization[​](#serialization \"Direct link to Serialization\")\n\n* [How to: save and load LangChain objects](/v0.2/docs/how_to/serialization/)\n\nUse cases[​](#use-cases \"Direct link to Use cases\")\n---------------------------------------------------\n\nThese guides cover use-case specific details.\n\n### Q&A with RAG[​](#qa-with-rag \"Direct link to Q&A with RAG\")\n\nRetrieval Augmented Generation (RAG) is a way to connect LLMs to external sources of data. For a high-level tutorial on RAG, check out [this guide](/v0.2/docs/tutorials/rag/).\n\n* [How to: add chat history](/v0.2/docs/how_to/qa_chat_history_how_to/)\n* [How to: stream](/v0.2/docs/how_to/qa_streaming/)\n* [How to: return sources](/v0.2/docs/how_to/qa_sources/)\n* [How to: return citations](/v0.2/docs/how_to/qa_citations/)\n* [How to: do per-user retrieval](/v0.2/docs/how_to/qa_per_user/)\n\n### Extraction[​](#extraction \"Direct link to Extraction\")\n\nExtraction is when you use LLMs to extract structured information from unstructured text. For a high level tutorial on extraction, check out [this guide](/v0.2/docs/tutorials/extraction/).\n\n* [How to: use reference examples](/v0.2/docs/how_to/extraction_examples/)\n* [How to: handle long text](/v0.2/docs/how_to/extraction_long_text/)\n* [How to: do extraction without using function calling](/v0.2/docs/how_to/extraction_parse/)\n\n### Chatbots[​](#chatbots \"Direct link to Chatbots\")\n\nChatbots involve using an LLM to have a conversation. For a high-level tutorial on building chatbots, check out [this guide](/v0.2/docs/tutorials/chatbot/).\n\n* [How to: manage memory](/v0.2/docs/how_to/chatbots_memory/)\n* [How to: do retrieval](/v0.2/docs/how_to/chatbots_retrieval/)\n* [How to: use tools](/v0.2/docs/how_to/chatbots_tools/)\n* [How to: manage large chat history](/v0.2/docs/how_to/trim_messages/)\n\n### Query analysis[​](#query-analysis \"Direct link to Query analysis\")\n\nQuery Analysis is the task of using an LLM to generate a query to send to a retriever. For a high-level tutorial on query analysis, check out [this guide](/v0.2/docs/tutorials/query_analysis/).\n\n* [How to: add examples to the prompt](/v0.2/docs/how_to/query_few_shot/)\n* [How to: handle cases where no queries are generated](/v0.2/docs/how_to/query_no_queries/)\n* [How to: handle multiple queries](/v0.2/docs/how_to/query_multiple_queries/)\n* [How to: handle multiple retrievers](/v0.2/docs/how_to/query_multiple_retrievers/)\n* [How to: construct filters](/v0.2/docs/how_to/query_constructing_filters/)\n* [How to: deal with high cardinality categorical variables](/v0.2/docs/how_to/query_high_cardinality/)\n\n### Q&A over SQL + CSV[​](#qa-over-sql--csv \"Direct link to Q&A over SQL + CSV\")\n\nYou can use LLMs to do question answering over tabular data. For a high-level tutorial, check out [this guide](/v0.2/docs/tutorials/sql_qa/).\n\n* [How to: use prompting to improve results](/v0.2/docs/how_to/sql_prompting/)\n* [How to: do query validation](/v0.2/docs/how_to/sql_query_checking/)\n* [How to: deal with large databases](/v0.2/docs/how_to/sql_large_db/)\n* [How to: deal with CSV files](/v0.2/docs/how_to/sql_csv/)\n\n### Q&A over graph databases[​](#qa-over-graph-databases \"Direct link to Q&A over graph databases\")\n\nYou can use an LLM to do question answering over graph databases. For a high-level tutorial, check out [this guide](/v0.2/docs/tutorials/graph/).\n\n* [How to: map values to a database](/v0.2/docs/how_to/graph_mapping/)\n* [How to: add a semantic layer over the database](/v0.2/docs/how_to/graph_semantic/)\n* [How to: improve results with prompting](/v0.2/docs/how_to/graph_prompting/)\n* [How to: construct knowledge graphs](/v0.2/docs/how_to/graph_constructing/)\n\n[LangGraph](https://langchain-ai.github.io/langgraph)[​](#langgraph \"Direct link to langgraph\")\n-----------------------------------------------------------------------------------------------\n\nLangGraph is an extension of LangChain aimed at building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.\n\nLangGraph documentation is currently hosted on a separate site. You can peruse [LangGraph how-to guides here](https://langchain-ai.github.io/langgraph/how-tos/).\n\n[LangSmith](https://docs.smith.langchain.com/)[​](#langsmith \"Direct link to langsmith\")\n----------------------------------------------------------------------------------------\n\nLangSmith allows you to closely trace, monitor and evaluate your LLM application. It seamlessly integrates with LangChain and LangGraph, and you can use it to inspect and debug individual steps of your chains and agents as you build.\n\nLangSmith documentation is hosted on a separate site. You can peruse [LangSmith how-to guides here](https://docs.smith.langchain.com/how_to_guides/).\n\n### Evaluation[​](#evaluation \"Direct link to Evaluation\")\n\nEvaluating performance is a vital part of building LLM-powered applications. LangSmith helps with every step of the process from creating a dataset to defining metrics to running evaluators.\n\nTo learn more, check out the [LangSmith evaluation how-to guides](https://docs.smith.langchain.com/how_to_guides#evaluation).\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/index.mdx)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nSummarize Text\n\n](/v0.2/docs/tutorials/summarization/)[\n\nNext\n\nHow-to guides\n\n](/v0.2/docs/how_to/)\n\n* [Installation](#installation)\n* [Key features](#key-features)\n* [LangChain Expression Language (LCEL)](#langchain-expression-language-lcel)\n* [Components](#components)\n * [Prompt templates](#prompt-templates)\n * [Example selectors](#example-selectors)\n * [Chat models](#chat-models)\n * [Messages](#messages)\n * [LLMs](#llms)\n * [Output parsers](#output-parsers)\n * [Document loaders](#document-loaders)\n * [Text splitters](#text-splitters)\n * [Embedding models](#embedding-models)\n * [Vector stores](#vector-stores)\n * [Retrievers](#retrievers)\n * [Indexing](#indexing)\n * [Tools](#tools)\n * [Multimodal](#multimodal)\n * [Agents](#agents)\n * [Callbacks](#callbacks)\n * [Custom](#custom)\n * [Serialization](#serialization)\n* [Use cases](#use-cases)\n * [Q&A with RAG](#qa-with-rag)\n * [Extraction](#extraction)\n * [Chatbots](#chatbots)\n * [Query analysis](#query-analysis)\n * [Q&A over SQL + CSV](#qa-over-sql--csv)\n * [Q&A over graph databases](#qa-over-graph-databases)\n* [LangGraph](#langgraph)\n* [LangSmith](#langsmith)\n * [Evaluation](#evaluation)"},"last_modified":{"kind":"null"}}},{"rowIdx":1398,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/tutorials/extraction/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [Tutorials](/v0.2/docs/tutorials/)\n* Build an Extraction Chain\n\nOn this page\n\nBuild an Extraction Chain\n=========================\n\nPrerequisites\n\nThis guide assumes familiarity with the following concepts:\n\n* [Chat Models](/v0.2/docs/concepts/#chat-models)\n* [Tools](/v0.2/docs/concepts/#tools)\n* [Tool calling](/v0.2/docs/concepts/#function-tool-calling)\n\nIn this tutorial, we will build a chain to extract structured information from unstructured text.\n\ninfo\n\nThis tutorial will only work with models that support **tool calling**\n\nSetup[​](#setup \"Direct link to Setup\")\n---------------------------------------\n\n### Jupyter Notebook[​](#jupyter-notebook \"Direct link to Jupyter Notebook\")\n\nThis guide (and most of the other guides in the documentation) uses [Jupyter notebooks](https://jupyter.org/) and assumes the reader is as well. Jupyter notebooks are perfect for learning how to work with LLM systems because oftentimes things can go wrong (unexpected output, API down, etc) and going through guides in an interactive environment is a great way to better understand them.\n\nThis and other tutorials are perhaps most conveniently run in a Jupyter notebook. See [here](https://jupyter.org/install) for instructions on how to install.\n\n### Installation[​](#installation \"Direct link to Installation\")\n\nTo install LangChain run:\n\n* Pip\n* Conda\n\n pip install langchain\n\n conda install langchain -c conda-forge\n\nFor more details, see our [Installation guide](/v0.2/docs/how_to/installation/).\n\n### LangSmith[​](#langsmith \"Direct link to LangSmith\")\n\nMany of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with [LangSmith](https://smith.langchain.com).\n\nAfter you sign up at the link above, make sure to set your environment variables to start logging traces:\n\n export LANGCHAIN_TRACING_V2=\"true\"export LANGCHAIN_API_KEY=\"...\"\n\nOr, if in a notebook, you can set them with:\n\n import getpassimport osos.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()\n\nThe Schema[​](#the-schema \"Direct link to The Schema\")\n------------------------------------------------------\n\nFirst, we need to describe what information we want to extract from the text.\n\nWe'll use Pydantic to define an example schema to extract personal information.\n\n from typing import Optionalfrom langchain_core.pydantic_v1 import BaseModel, Fieldclass Person(BaseModel): \"\"\"Information about a person.\"\"\" # ^ Doc-string for the entity Person. # This doc-string is sent to the LLM as the description of the schema Person, # and it can help to improve extraction results. # Note that: # 1. Each field is an `optional` -- this allows the model to decline to extract it! # 2. Each field has a `description` -- this description is used by the LLM. # Having a good description can help improve extraction results. name: Optional[str] = Field(default=None, description=\"The name of the person\") hair_color: Optional[str] = Field( default=None, description=\"The color of the person's hair if known\" ) height_in_meters: Optional[str] = Field( default=None, description=\"Height measured in meters\" )\n\nThere are two best practices when defining schema:\n\n1. Document the **attributes** and the **schema** itself: This information is sent to the LLM and is used to improve the quality of information extraction.\n2. Do not force the LLM to make up information! Above we used `Optional` for the attributes allowing the LLM to output `None` if it doesn't know the answer.\n\ninfo\n\nFor best performance, document the schema well and make sure the model isn't force to return results if there's no information to be extracted in the text.\n\nThe Extractor[​](#the-extractor \"Direct link to The Extractor\")\n---------------------------------------------------------------\n\nLet's create an information extractor using the schema we defined above.\n\n from typing import Optionalfrom langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholderfrom langchain_core.pydantic_v1 import BaseModel, Field# Define a custom prompt to provide instructions and any additional context.# 1) You can add examples into the prompt template to improve extraction quality# 2) Introduce additional parameters to take context into account (e.g., include metadata# about the document from which the text was extracted.)prompt = ChatPromptTemplate.from_messages( [ ( \"system\", \"You are an expert extraction algorithm. \" \"Only extract relevant information from the text. \" \"If you do not know the value of an attribute asked to extract, \" \"return null for the attribute's value.\", ), # Please see the how-to about improving performance with # reference examples. # MessagesPlaceholder('examples'), (\"human\", \"{text}\"), ])\n\n**API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [MessagesPlaceholder](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.MessagesPlaceholder.html)\n\nWe need to use a model that supports function/tool calling.\n\nPlease review [the documentation](/v0.2/docs/concepts/#function-tool-calling) for list of some models that can be used with this API.\n\n from langchain_mistralai import ChatMistralAIllm = ChatMistralAI(model=\"mistral-large-latest\", temperature=0)runnable = prompt | llm.with_structured_output(schema=Person)\n\n**API Reference:**[ChatMistralAI](https://api.python.langchain.com/en/latest/chat_models/langchain_mistralai.chat_models.ChatMistralAI.html)\n\n /Users/harrisonchase/workplace/langchain/libs/core/langchain_core/_api/beta_decorator.py:87: LangChainBetaWarning: The method `ChatMistralAI.with_structured_output` is in beta. It is actively being worked on, so the API may change. warn_beta(\n\nLet's test it out\n\n text = \"Alan Smith is 6 feet tall and has blond hair.\"runnable.invoke({\"text\": text})\n\n Person(name='Alan Smith', hair_color='blond', height_in_meters='1.83')\n\ninfo\n\nExtraction is Generative 🤯\n\nLLMs are generative models, so they can do some pretty cool things like correctly extract the height of the person in meters even though it was provided in feet!\n\nWe can see the LangSmith trace here: [https://smith.langchain.com/public/44b69a63-3b3b-47b8-8a6d-61b46533f015/r](https://smith.langchain.com/public/44b69a63-3b3b-47b8-8a6d-61b46533f015/r)\n\nMultiple Entities[​](#multiple-entities \"Direct link to Multiple Entities\")\n---------------------------------------------------------------------------\n\nIn **most cases**, you should be extracting a list of entities rather than a single entity.\n\nThis can be easily achieved using pydantic by nesting models inside one another.\n\n from typing import List, Optionalfrom langchain_core.pydantic_v1 import BaseModel, Fieldclass Person(BaseModel): \"\"\"Information about a person.\"\"\" # ^ Doc-string for the entity Person. # This doc-string is sent to the LLM as the description of the schema Person, # and it can help to improve extraction results. # Note that: # 1. Each field is an `optional` -- this allows the model to decline to extract it! # 2. Each field has a `description` -- this description is used by the LLM. # Having a good description can help improve extraction results. name: Optional[str] = Field(default=None, description=\"The name of the person\") hair_color: Optional[str] = Field( default=None, description=\"The color of the person's hair if known\" ) height_in_meters: Optional[str] = Field( default=None, description=\"Height measured in meters\" )class Data(BaseModel): \"\"\"Extracted data about people.\"\"\" # Creates a model so that we can extract multiple entities. people: List[Person]\n\ninfo\n\nExtraction might not be perfect here. Please continue to see how to use **Reference Examples** to improve the quality of extraction, and see the **guidelines** section!\n\n runnable = prompt | llm.with_structured_output(schema=Data)text = \"My name is Jeff, my hair is black and i am 6 feet tall. Anna has the same color hair as me.\"runnable.invoke({\"text\": text})\n\n Data(people=[Person(name='Jeff', hair_color=None, height_in_meters=None), Person(name='Anna', hair_color=None, height_in_meters=None)])\n\ntip\n\nWhen the schema accommodates the extraction of **multiple entities**, it also allows the model to extract **no entities** if no relevant information is in the text by providing an empty list.\n\nThis is usually a **good** thing! It allows specifying **required** attributes on an entity without necessarily forcing the model to detect this entity.\n\nWe can see the LangSmith trace here: [https://smith.langchain.com/public/7173764d-5e76-45fe-8496-84460bd9cdef/r](https://smith.langchain.com/public/7173764d-5e76-45fe-8496-84460bd9cdef/r)\n\nNext steps[​](#next-steps \"Direct link to Next steps\")\n------------------------------------------------------\n\nNow that you understand the basics of extraction with LangChain, you're ready to proceed to the rest of the how-to guides:\n\n* [Add Examples](/v0.2/docs/how_to/extraction_examples/): Learn how to use **reference examples** to improve performance.\n* [Handle Long Text](/v0.2/docs/how_to/extraction_long_text/): What should you do if the text does not fit into the context window of the LLM?\n* [Use a Parsing Approach](/v0.2/docs/how_to/extraction_parse/): Use a prompt based approach to extract with models that do not support **tool/function calling**.\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/tutorials/extraction.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nConversational RAG\n\n](/v0.2/docs/tutorials/qa_chat_history/)[\n\nNext\n\nBuild an Agent\n\n](/v0.2/docs/tutorials/agents/)\n\n* [Setup](#setup)\n * [Jupyter Notebook](#jupyter-notebook)\n * [Installation](#installation)\n * [LangSmith](#langsmith)\n* [The Schema](#the-schema)\n* [The Extractor](#the-extractor)\n* [Multiple Entities](#multiple-entities)\n* [Next steps](#next-steps)"},"last_modified":{"kind":"null"}}},{"rowIdx":1399,"cells":{"url":{"kind":"string","value":"https://python.langchain.com/v0.2/docs/how_to/chatbots_memory/"},"markdown":{"kind":"string","value":"* [](/v0.2/)\n* [How-to guides](/v0.2/docs/how_to/)\n* How to add memory to chatbots\n\nOn this page\n\nHow to add memory to chatbots\n=============================\n\nA key feature of chatbots is their ability to use content of previous conversation turns as context. This state management can take several forms, including:\n\n* Simply stuffing previous messages into a chat model prompt.\n* The above, but trimming old messages to reduce the amount of distracting information the model has to deal with.\n* More complex modifications like synthesizing summaries for long running conversations.\n\nWe'll go into more detail on a few techniques below!\n\nSetup[​](#setup \"Direct link to Setup\")\n---------------------------------------\n\nYou'll need to install a few packages, and have your OpenAI API key set as an environment variable named `OPENAI_API_KEY`:\n\n %pip install --upgrade --quiet langchain langchain-openai# Set env var OPENAI_API_KEY or load from a .env file:import dotenvdotenv.load_dotenv()\n\n \u001b[33mWARNING: You are using pip version 22.0.4; however, version 23.3.2 is available.You should consider upgrading via the '/Users/jacoblee/.pyenv/versions/3.10.5/bin/python -m pip install --upgrade pip' command.\u001b[0m\u001b[33m\u001b[0mNote: you may need to restart the kernel to use updated packages.\n\n True\n\nLet's also set up a chat model that we'll use for the below examples.\n\n from langchain_openai import ChatOpenAIchat = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")\n\n**API Reference:**[ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html)\n\nMessage passing[​](#message-passing \"Direct link to Message passing\")\n---------------------------------------------------------------------\n\nThe simplest form of memory is simply passing chat history messages into a chain. Here's an example:\n\n from langchain_core.prompts import ChatPromptTemplateprompt = ChatPromptTemplate.from_messages( [ ( \"system\", \"You are a helpful assistant. Answer all questions to the best of your ability.\", ), (\"placeholder\", \"{messages}\"), ])chain = prompt | chatai_msg = chain.invoke( { \"messages\": [ ( \"human\", \"Translate this sentence from English to French: I love programming.\", ), (\"ai\", \"J'adore la programmation.\"), (\"human\", \"What did you just say?\"), ], })print(ai_msg.content)\n\n**API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html)\n\n I said \"J'adore la programmation,\" which means \"I love programming\" in French.\n\nWe can see that by passing the previous conversation into a chain, it can use it as context to answer questions. This is the basic concept underpinning chatbot memory - the rest of the guide will demonstrate convenient techniques for passing or reformatting messages.\n\nChat history[​](#chat-history \"Direct link to Chat history\")\n------------------------------------------------------------\n\nIt's perfectly fine to store and pass messages directly as an array, but we can use LangChain's built-in [message history class](https://api.python.langchain.com/en/latest/langchain_api_reference.html#module-langchain.memory) to store and load messages as well. Instances of this class are responsible for storing and loading chat messages from persistent storage. LangChain integrates with many providers - you can see a [list of integrations here](/v0.2/docs/integrations/memory/) - but for this demo we will use an ephemeral demo class.\n\nHere's an example of the API:\n\n from langchain_community.chat_message_histories import ChatMessageHistorydemo_ephemeral_chat_history = ChatMessageHistory()demo_ephemeral_chat_history.add_user_message( \"Translate this sentence from English to French: I love programming.\")demo_ephemeral_chat_history.add_ai_message(\"J'adore la programmation.\")demo_ephemeral_chat_history.messages\n\n**API Reference:**[ChatMessageHistory](https://api.python.langchain.com/en/latest/chat_history/langchain_core.chat_history.ChatMessageHistory.html)\n\n [HumanMessage(content='Translate this sentence from English to French: I love programming.'), AIMessage(content=\"J'adore la programmation.\")]\n\nWe can use it directly to store conversation turns for our chain:\n\n demo_ephemeral_chat_history = ChatMessageHistory()input1 = \"Translate this sentence from English to French: I love programming.\"demo_ephemeral_chat_history.add_user_message(input1)response = chain.invoke( { \"messages\": demo_ephemeral_chat_history.messages, })demo_ephemeral_chat_history.add_ai_message(response)input2 = \"What did I just ask you?\"demo_ephemeral_chat_history.add_user_message(input2)chain.invoke( { \"messages\": demo_ephemeral_chat_history.messages, })\n\n AIMessage(content='You just asked me to translate the sentence \"I love programming\" from English to French.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 61, 'total_tokens': 79}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5cbb21c2-9c30-4031-8ea8-bfc497989535-0', usage_metadata={'input_tokens': 61, 'output_tokens': 18, 'total_tokens': 79})\n\nAutomatic history management[​](#automatic-history-management \"Direct link to Automatic history management\")\n------------------------------------------------------------------------------------------------------------\n\nThe previous examples pass messages to the chain explicitly. This is a completely acceptable approach, but it does require external management of new messages. LangChain also includes an wrapper for LCEL chains that can handle this process automatically called `RunnableWithMessageHistory`.\n\nTo show how it works, let's slightly modify the above prompt to take a final `input` variable that populates a `HumanMessage` template after the chat history. This means that we will expect a `chat_history` parameter that contains all messages BEFORE the current messages instead of all messages:\n\n prompt = ChatPromptTemplate.from_messages( [ ( \"system\", \"You are a helpful assistant. Answer all questions to the best of your ability.\", ), (\"placeholder\", \"{chat_history}\"), (\"human\", \"{input}\"), ])chain = prompt | chat\n\nWe'll pass the latest input to the conversation here and let the `RunnableWithMessageHistory` class wrap our chain and do the work of appending that `input` variable to the chat history.\n\nNext, let's declare our wrapped chain:\n\n from langchain_core.runnables.history import RunnableWithMessageHistorydemo_ephemeral_chat_history_for_chain = ChatMessageHistory()chain_with_message_history = RunnableWithMessageHistory( chain, lambda session_id: demo_ephemeral_chat_history_for_chain, input_messages_key=\"input\", history_messages_key=\"chat_history\",)\n\n**API Reference:**[RunnableWithMessageHistory](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html)\n\nThis class takes a few parameters in addition to the chain that we want to wrap:\n\n* A factory function that returns a message history for a given session id. This allows your chain to handle multiple users at once by loading different messages for different conversations.\n* An `input_messages_key` that specifies which part of the input should be tracked and stored in the chat history. In this example, we want to track the string passed in as `input`.\n* A `history_messages_key` that specifies what the previous messages should be injected into the prompt as. Our prompt has a `MessagesPlaceholder` named `chat_history`, so we specify this property to match.\n* (For chains with multiple outputs) an `output_messages_key` which specifies which output to store as history. This is the inverse of `input_messages_key`.\n\nWe can invoke this new chain as normal, with an additional `configurable` field that specifies the particular `session_id` to pass to the factory function. This is unused for the demo, but in real-world chains, you'll want to return a chat history corresponding to the passed session:\n\n chain_with_message_history.invoke( {\"input\": \"Translate this sentence from English to French: I love programming.\"}, {\"configurable\": {\"session_id\": \"unused\"}},)\n\n Parent run dc4e2f79-4bcd-4a36-9506-55ace9040588 not found for run 34b5773e-3ced-46a6-8daf-4d464c15c940. Treating as a root run.\n\n AIMessage(content='\"J\\'adore la programmation.\"', response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 39, 'total_tokens': 48}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-648b0822-b0bb-47a2-8e7d-7d34744be8f2-0', usage_metadata={'input_tokens': 39, 'output_tokens': 9, 'total_tokens': 48})\n\n chain_with_message_history.invoke( {\"input\": \"What did I just ask you?\"}, {\"configurable\": {\"session_id\": \"unused\"}})\n\n Parent run cc14b9d8-c59e-40db-a523-d6ab3fc2fa4f not found for run 5b75e25c-131e-46ee-9982-68569db04330. Treating as a root run.\n\n AIMessage(content='You asked me to translate the sentence \"I love programming\" from English to French.', response_metadata={'token_usage': {'completion_tokens': 17, 'prompt_tokens': 63, 'total_tokens': 80}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5950435c-1dc2-43a6-836f-f989fd62c95e-0', usage_metadata={'input_tokens': 63, 'output_tokens': 17, 'total_tokens': 80})\n\nModifying chat history[​](#modifying-chat-history \"Direct link to Modifying chat history\")\n------------------------------------------------------------------------------------------\n\nModifying stored chat messages can help your chatbot handle a variety of situations. Here are some examples:\n\n### Trimming messages[​](#trimming-messages \"Direct link to Trimming messages\")\n\nLLMs and chat models have limited context windows, and even if you're not directly hitting limits, you may want to limit the amount of distraction the model has to deal with. One solution is trim the historic messages before passing them to the model. Let's use an example history with some preloaded messages:\n\n demo_ephemeral_chat_history = ChatMessageHistory()demo_ephemeral_chat_history.add_user_message(\"Hey there! I'm Nemo.\")demo_ephemeral_chat_history.add_ai_message(\"Hello!\")demo_ephemeral_chat_history.add_user_message(\"How are you today?\")demo_ephemeral_chat_history.add_ai_message(\"Fine thanks!\")demo_ephemeral_chat_history.messages\n\n [HumanMessage(content=\"Hey there! I'm Nemo.\"), AIMessage(content='Hello!'), HumanMessage(content='How are you today?'), AIMessage(content='Fine thanks!')]\n\nLet's use this message history with the `RunnableWithMessageHistory` chain we declared above:\n\n chain_with_message_history = RunnableWithMessageHistory( chain, lambda session_id: demo_ephemeral_chat_history, input_messages_key=\"input\", history_messages_key=\"chat_history\",)chain_with_message_history.invoke( {\"input\": \"What's my name?\"}, {\"configurable\": {\"session_id\": \"unused\"}},)\n\n Parent run 7ff2d8ec-65e2-4f67-8961-e498e2c4a591 not found for run 3881e990-6596-4326-84f6-2b76949e0657. Treating as a root run.\n\n AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72})\n\nWe can see the chain remembers the preloaded name.\n\nBut let's say we have a very small context window, and we want to trim the number of messages passed to the chain to only the 2 most recent ones. We can use the built in [trim\\_messages](/v0.2/docs/how_to/trim_messages/) util to trim messages based on their token count before they reach our prompt. In this case we'll count each message as 1 \"token\" and keep only the last two messages:\n\n from operator import itemgetterfrom langchain_core.messages import trim_messagesfrom langchain_core.runnables import RunnablePassthroughtrimmer = trim_messages(strategy=\"last\", max_tokens=2, token_counter=len)chain_with_trimming = ( RunnablePassthrough.assign(chat_history=itemgetter(\"chat_history\") | trimmer) | prompt | chat)chain_with_trimmed_history = RunnableWithMessageHistory( chain_with_trimming, lambda session_id: demo_ephemeral_chat_history, input_messages_key=\"input\", history_messages_key=\"chat_history\",)\n\n**API Reference:**[trim\\_messages](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.utils.trim_messages.html) | [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html)\n\nLet's call this new chain and check the messages afterwards:\n\n chain_with_trimmed_history.invoke( {\"input\": \"Where does P. Sherman live?\"}, {\"configurable\": {\"session_id\": \"unused\"}},)\n\n Parent run 775cde65-8d22-4c44-80bb-f0b9811c32ca not found for run 5cf71d0e-4663-41cd-8dbe-e9752689cfac. Treating as a root run.\n\n AIMessage(content='P. Sherman is a fictional character from the animated movie \"Finding Nemo\" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80})\n\n demo_ephemeral_chat_history.messages\n\n [HumanMessage(content=\"Hey there! I'm Nemo.\"), AIMessage(content='Hello!'), HumanMessage(content='How are you today?'), AIMessage(content='Fine thanks!'), HumanMessage(content=\"What's my name?\"), AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72}), HumanMessage(content='Where does P. Sherman live?'), AIMessage(content='P. Sherman is a fictional character from the animated movie \"Finding Nemo\" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80})]\n\nAnd we can see that our history has removed the two oldest messages while still adding the most recent conversation at the end. The next time the chain is called, `trim_messages` will be called again, and only the two most recent messages will be passed to the model. In this case, this means that the model will forget the name we gave it the next time we invoke it:\n\n chain_with_trimmed_history.invoke( {\"input\": \"What is my name?\"}, {\"configurable\": {\"session_id\": \"unused\"}},)\n\n Parent run fde7123f-6fd3-421a-a3fc-2fb37dead119 not found for run 061a4563-2394-470d-a3ed-9bf1388ca431. Treating as a root run.\n\n AIMessage(content=\"I'm sorry, but I don't have access to your personal information, so I don't know your name. How else may I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 31, 'prompt_tokens': 74, 'total_tokens': 105}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-0ab03495-1f7c-4151-9070-56d2d1c565ff-0', usage_metadata={'input_tokens': 74, 'output_tokens': 31, 'total_tokens': 105})\n\nCheck out our [how to guide on trimming messages](/v0.2/docs/how_to/trim_messages/) for more.\n\n### Summary memory[​](#summary-memory \"Direct link to Summary memory\")\n\nWe can use this same pattern in other ways too. For example, we could use an additional LLM call to generate a summary of the conversation before calling our chain. Let's recreate our chat history and chatbot chain:\n\n demo_ephemeral_chat_history = ChatMessageHistory()demo_ephemeral_chat_history.add_user_message(\"Hey there! I'm Nemo.\")demo_ephemeral_chat_history.add_ai_message(\"Hello!\")demo_ephemeral_chat_history.add_user_message(\"How are you today?\")demo_ephemeral_chat_history.add_ai_message(\"Fine thanks!\")demo_ephemeral_chat_history.messages\n\n [HumanMessage(content=\"Hey there! I'm Nemo.\"), AIMessage(content='Hello!'), HumanMessage(content='How are you today?'), AIMessage(content='Fine thanks!')]\n\nWe'll slightly modify the prompt to make the LLM aware that will receive a condensed summary instead of a chat history:\n\n prompt = ChatPromptTemplate.from_messages( [ ( \"system\", \"You are a helpful assistant. Answer all questions to the best of your ability. The provided chat history includes facts about the user you are speaking with.\", ), (\"placeholder\", \"{chat_history}\"), (\"user\", \"{input}\"), ])chain = prompt | chatchain_with_message_history = RunnableWithMessageHistory( chain, lambda session_id: demo_ephemeral_chat_history, input_messages_key=\"input\", history_messages_key=\"chat_history\",)\n\nAnd now, let's create a function that will distill previous interactions into a summary. We can add this one to the front of the chain too:\n\n def summarize_messages(chain_input): stored_messages = demo_ephemeral_chat_history.messages if len(stored_messages) == 0: return False summarization_prompt = ChatPromptTemplate.from_messages( [ (\"placeholder\", \"{chat_history}\"), ( \"user\", \"Distill the above chat messages into a single summary message. Include as many specific details as you can.\", ), ] ) summarization_chain = summarization_prompt | chat summary_message = summarization_chain.invoke({\"chat_history\": stored_messages}) demo_ephemeral_chat_history.clear() demo_ephemeral_chat_history.add_message(summary_message) return Truechain_with_summarization = ( RunnablePassthrough.assign(messages_summarized=summarize_messages) | chain_with_message_history)\n\nLet's see if it remembers the name we gave it:\n\n chain_with_summarization.invoke( {\"input\": \"What did I say my name was?\"}, {\"configurable\": {\"session_id\": \"unused\"}},)\n\n AIMessage(content='You introduced yourself as Nemo. How can I assist you today, Nemo?')\n\n demo_ephemeral_chat_history.messages\n\n [AIMessage(content='The conversation is between Nemo and an AI. Nemo introduces himself and the AI responds with a greeting. Nemo then asks the AI how it is doing, and the AI responds that it is fine.'), HumanMessage(content='What did I say my name was?'), AIMessage(content='You introduced yourself as Nemo. How can I assist you today, Nemo?')]\n\nNote that invoking the chain again will generate another summary generated from the initial summary plus new messages and so on. You could also design a hybrid approach where a certain number of messages are retained in chat history while others are summarized.\n\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/chatbots_memory.ipynb)\n\n* * *\n\n#### Was this page helpful?\n\n \n\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).\n\n[\n\nPrevious\n\nHow to use a vectorstore as a retriever\n\n](/v0.2/docs/how_to/vectorstore_retriever/)[\n\nNext\n\nHow to use example selectors\n\n](/v0.2/docs/how_to/example_selectors/)\n\n* [Setup](#setup)\n* [Message passing](#message-passing)\n* [Chat history](#chat-history)\n* [Automatic history management](#automatic-history-management)\n* [Modifying chat history](#modifying-chat-history)\n * [Trimming messages](#trimming-messages)\n * [Summary memory](#summary-memory)"},"last_modified":{"kind":"null"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":13,"numItemsPerPage":100,"numTotalItems":1871,"offset":1300,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NTkxMjY2NSwic3ViIjoiL2RhdGFzZXRzL3NlYW5zdWxsaXZhbi9MYW5nY2hhaW4tRG9jcy1KdW5lLTI2LTIwMjQiLCJleHAiOjE3NTU5MTYyNjUsImlzcyI6Imh0dHBzOi8vaHVnZ2luZ2ZhY2UuY28ifQ.vs6ZD2iTYVjkcpU3hqum2qopj6Hk54AuEIV3uJgldo_0tDun2C_rxnm9_e-YQDb3JOHyaTqf2vJJqcC5kgb7Cw","displayUrls":true},"discussionsStats":{"closed":0,"open":1,"total":1},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
url
stringlengths
30
161
markdown
stringlengths
27
670k
last_modified
stringclasses
1 value
https://python.langchain.com/v0.2/docs/how_to/tools_few_shot/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to use few-shot prompting with tool calling How to use few-shot prompting with tool calling =============================================== For more complex tool use it's very useful to add few-shot examples to the prompt. We can do this by adding `AIMessage`s with `ToolCall`s and corresponding `ToolMessage`s to our prompt. First let's define our tools and model. from langchain_core.tools import tool@tooldef add(a: int, b: int) -> int: """Adds a and b.""" return a + b@tooldef multiply(a: int, b: int) -> int: """Multiplies a and b.""" return a * btools = [add, multiply] **API Reference:**[tool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html) import osfrom getpass import getpassfrom langchain_openai import ChatOpenAIos.environ["OPENAI_API_KEY"] = getpass()llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)llm_with_tools = llm.bind_tools(tools) **API Reference:**[ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) Let's run our model where we can notice that even with some special instructions our model can get tripped up by order of operations. llm_with_tools.invoke( "Whats 119 times 8 minus 20. Don't do any math yourself, only use tools for math. Respect order of operations").tool_calls [{'name': 'Multiply', 'args': {'a': 119, 'b': 8}, 'id': 'call_T88XN6ECucTgbXXkyDeC2CQj'}, {'name': 'Add', 'args': {'a': 952, 'b': -20}, 'id': 'call_licdlmGsRqzup8rhqJSb1yZ4'}] The model shouldn't be trying to add anything yet, since it technically can't know the results of 119 \* 8 yet. By adding a prompt with some examples we can correct this behavior: from langchain_core.messages import AIMessage, HumanMessage, ToolMessagefrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.runnables import RunnablePassthroughexamples = [ HumanMessage( "What's the product of 317253 and 128472 plus four", name="example_user" ), AIMessage( "", name="example_assistant", tool_calls=[ {"name": "Multiply", "args": {"x": 317253, "y": 128472}, "id": "1"} ], ), ToolMessage("16505054784", tool_call_id="1"), AIMessage( "", name="example_assistant", tool_calls=[{"name": "Add", "args": {"x": 16505054784, "y": 4}, "id": "2"}], ), ToolMessage("16505054788", tool_call_id="2"), AIMessage( "The product of 317253 and 128472 plus four is 16505054788", name="example_assistant", ),]system = """You are bad at math but are an expert at using a calculator. Use past tool usage as an example of how to correctly use the tools."""few_shot_prompt = ChatPromptTemplate.from_messages( [ ("system", system), *examples, ("human", "{query}"), ])chain = {"query": RunnablePassthrough()} | few_shot_prompt | llm_with_toolschain.invoke("Whats 119 times 8 minus 20").tool_calls **API Reference:**[AIMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html) | [HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) | [ToolMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolMessage.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html) [{'name': 'Multiply', 'args': {'a': 119, 'b': 8}, 'id': 'call_9MvuwQqg7dlJupJcoTWiEsDo'}] And we get the correct output this time. Here's what the [LangSmith trace](https://smith.langchain.com/public/f70550a1-585f-4c9d-a643-13148ab1616f/r) looks like. [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/tools_few_shot.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to handle tool errors ](/v0.2/docs/how_to/tools_error/)[ Next How to add a human-in-the-loop for tools ](/v0.2/docs/how_to/tools_human/)
null
https://python.langchain.com/v0.2/docs/how_to/graph_constructing/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to construct knowledge graphs On this page How to construct knowledge graphs ================================= In this guide we'll go over the basic ways of constructing a knowledge graph based on unstructured text. The constructured graph can then be used as knowledge base in a RAG application. ⚠️ Security note ⚠️[​](#️-security-note-️ "Direct link to ⚠️ Security note ⚠️") ------------------------------------------------------------------------------- Constructing knowledge graphs requires executing write access to the database. There are inherent risks in doing this. Make sure that you verify and validate data before importing it. For more on general security best practices, [see here](/v0.2/docs/security/). Architecture[​](#architecture "Direct link to Architecture") ------------------------------------------------------------ At a high-level, the steps of constructing a knowledge are from text are: 1. **Extracting structured information from text**: Model is used to extract structured graph information from text. 2. **Storing into graph database**: Storing the extracted structured graph information into a graph database enables downstream RAG applications Setup[​](#setup "Direct link to Setup") --------------------------------------- First, get required packages and set environment variables. In this example, we will be using Neo4j graph database. %pip install --upgrade --quiet langchain langchain-community langchain-openai langchain-experimental neo4j Note: you may need to restart the kernel to use updated packages. We default to OpenAI models in this guide. import getpassimport osos.environ["OPENAI_API_KEY"] = getpass.getpass()# Uncomment the below to use LangSmith. Not required.# os.environ["LANGCHAIN_API_KEY"] = getpass.getpass()# os.environ["LANGCHAIN_TRACING_V2"] = "true" ········ Next, we need to define Neo4j credentials and connection. Follow [these installation steps](https://neo4j.com/docs/operations-manual/current/installation/) to set up a Neo4j database. import osfrom langchain_community.graphs import Neo4jGraphos.environ["NEO4J_URI"] = "bolt://localhost:7687"os.environ["NEO4J_USERNAME"] = "neo4j"os.environ["NEO4J_PASSWORD"] = "password"graph = Neo4jGraph() **API Reference:**[Neo4jGraph](https://api.python.langchain.com/en/latest/graphs/langchain_community.graphs.neo4j_graph.Neo4jGraph.html) LLM Graph Transformer[​](#llm-graph-transformer "Direct link to LLM Graph Transformer") --------------------------------------------------------------------------------------- Extracting graph data from text enables the transformation of unstructured information into structured formats, facilitating deeper insights and more efficient navigation through complex relationships and patterns. The `LLMGraphTransformer` converts text documents into structured graph documents by leveraging a LLM to parse and categorize entities and their relationships. The selection of the LLM model significantly influences the output by determining the accuracy and nuance of the extracted graph data. import osfrom langchain_experimental.graph_transformers import LLMGraphTransformerfrom langchain_openai import ChatOpenAIllm = ChatOpenAI(temperature=0, model_name="gpt-4-turbo")llm_transformer = LLMGraphTransformer(llm=llm) **API Reference:**[LLMGraphTransformer](https://api.python.langchain.com/en/latest/graph_transformers/langchain_experimental.graph_transformers.llm.LLMGraphTransformer.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) Now we can pass in example text and examine the results. from langchain_core.documents import Documenttext = """Marie Curie, born in 1867, was a Polish and naturalised-French physicist and chemist who conducted pioneering research on radioactivity.She was the first woman to win a Nobel Prize, the first person to win a Nobel Prize twice, and the only person to win a Nobel Prize in two scientific fields.Her husband, Pierre Curie, was a co-winner of her first Nobel Prize, making them the first-ever married couple to win the Nobel Prize and launching the Curie family legacy of five Nobel Prizes.She was, in 1906, the first woman to become a professor at the University of Paris."""documents = [Document(page_content=text)]graph_documents = llm_transformer.convert_to_graph_documents(documents)print(f"Nodes:{graph_documents[0].nodes}")print(f"Relationships:{graph_documents[0].relationships}") **API Reference:**[Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) Nodes:[Node(id='Marie Curie', type='Person'), Node(id='Pierre Curie', type='Person'), Node(id='University Of Paris', type='Organization')]Relationships:[Relationship(source=Node(id='Marie Curie', type='Person'), target=Node(id='Pierre Curie', type='Person'), type='MARRIED'), Relationship(source=Node(id='Marie Curie', type='Person'), target=Node(id='University Of Paris', type='Organization'), type='PROFESSOR')] Examine the following image to better grasp the structure of the generated knowledge graph. ![graph_construction1.png](/v0.2/assets/images/graph_construction1-2b4d31978d58696d5a6a52ad92ae088f.png) Note that the graph construction process is non-deterministic since we are using LLM. Therefore, you might get slightly different results on each execution. Additionally, you have the flexibility to define specific types of nodes and relationships for extraction according to your requirements. llm_transformer_filtered = LLMGraphTransformer( llm=llm, allowed_nodes=["Person", "Country", "Organization"], allowed_relationships=["NATIONALITY", "LOCATED_IN", "WORKED_AT", "SPOUSE"],)graph_documents_filtered = llm_transformer_filtered.convert_to_graph_documents( documents)print(f"Nodes:{graph_documents_filtered[0].nodes}")print(f"Relationships:{graph_documents_filtered[0].relationships}") Nodes:[Node(id='Marie Curie', type='Person'), Node(id='Pierre Curie', type='Person'), Node(id='University Of Paris', type='Organization')]Relationships:[Relationship(source=Node(id='Marie Curie', type='Person'), target=Node(id='Pierre Curie', type='Person'), type='SPOUSE'), Relationship(source=Node(id='Marie Curie', type='Person'), target=Node(id='University Of Paris', type='Organization'), type='WORKED_AT')] For a better understanding of the generated graph, we can again visualize it. ![graph_construction2.png](/v0.2/assets/images/graph_construction2-8b43506ae0fb3a006eaa4ba83fea8af5.png) The `node_properties` parameter enables the extraction of node properties, allowing the creation of a more detailed graph. When set to `True`, LLM autonomously identifies and extracts relevant node properties. Conversely, if `node_properties` is defined as a list of strings, the LLM selectively retrieves only the specified properties from the text. llm_transformer_props = LLMGraphTransformer( llm=llm, allowed_nodes=["Person", "Country", "Organization"], allowed_relationships=["NATIONALITY", "LOCATED_IN", "WORKED_AT", "SPOUSE"], node_properties=["born_year"],)graph_documents_props = llm_transformer_props.convert_to_graph_documents(documents)print(f"Nodes:{graph_documents_props[0].nodes}")print(f"Relationships:{graph_documents_props[0].relationships}") Nodes:[Node(id='Marie Curie', type='Person', properties={'born_year': '1867'}), Node(id='Pierre Curie', type='Person'), Node(id='University Of Paris', type='Organization')]Relationships:[Relationship(source=Node(id='Marie Curie', type='Person'), target=Node(id='Pierre Curie', type='Person'), type='SPOUSE'), Relationship(source=Node(id='Marie Curie', type='Person'), target=Node(id='University Of Paris', type='Organization'), type='WORKED_AT')] Storing to graph database[​](#storing-to-graph-database "Direct link to Storing to graph database") --------------------------------------------------------------------------------------------------- The generated graph documents can be stored to a graph database using the `add_graph_documents` method. graph.add_graph_documents(graph_documents_props) [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/graph_constructing.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous Build an Agent with AgentExecutor (Legacy) ](/v0.2/docs/how_to/agent_executor/)[ Next How to partially format prompt templates ](/v0.2/docs/how_to/prompts_partial/) * [⚠️ Security note ⚠️](#️-security-note-️) * [Architecture](#architecture) * [Setup](#setup) * [LLM Graph Transformer](#llm-graph-transformer) * [Storing to graph database](#storing-to-graph-database)
null
https://python.langchain.com/v0.2/docs/how_to/tools_error/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to handle tool errors On this page How to handle tool errors ========================= Using a model to invoke a tool has some obvious potential failure modes. Firstly, the model needs to return a output that can be parsed at all. Secondly, the model needs to return tool arguments that are valid. We can build error handling into our chains to mitigate these failure modes. Setup[​](#setup "Direct link to Setup") --------------------------------------- We'll need to install the following packages: %pip install --upgrade --quiet langchain-core langchain-openai If you'd like to trace your runs in [LangSmith](https://docs.smith.langchain.com/) uncomment and set the following environment variables: import getpassimport os# os.environ["LANGCHAIN_TRACING_V2"] = "true"# os.environ["LANGCHAIN_API_KEY"] = getpass.getpass() Chain[​](#chain "Direct link to Chain") --------------------------------------- Suppose we have the following (dummy) tool and tool-calling chain. We'll make our tool intentionally convoluted to try and trip up the model. * OpenAI * Anthropic * Azure * Google * Cohere * FireworksAI * Groq * MistralAI * TogetherAI pip install -qU langchain-openai import getpassimport osos.environ["OPENAI_API_KEY"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI(model="gpt-3.5-turbo-0125") pip install -qU langchain-anthropic import getpassimport osos.environ["ANTHROPIC_API_KEY"] = getpass.getpass()from langchain_anthropic import ChatAnthropicllm = ChatAnthropic(model="claude-3-sonnet-20240229") pip install -qU langchain-openai import getpassimport osos.environ["AZURE_OPENAI_API_KEY"] = getpass.getpass()from langchain_openai import AzureChatOpenAIllm = AzureChatOpenAI( azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"], openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],) pip install -qU langchain-google-vertexai import getpassimport osos.environ["GOOGLE_API_KEY"] = getpass.getpass()from langchain_google_vertexai import ChatVertexAIllm = ChatVertexAI(model="gemini-pro") pip install -qU langchain-cohere import getpassimport osos.environ["COHERE_API_KEY"] = getpass.getpass()from langchain_cohere import ChatCoherellm = ChatCohere(model="command-r") pip install -qU langchain-fireworks import getpassimport osos.environ["FIREWORKS_API_KEY"] = getpass.getpass()from langchain_fireworks import ChatFireworksllm = ChatFireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct") pip install -qU langchain-groq import getpassimport osos.environ["GROQ_API_KEY"] = getpass.getpass()from langchain_groq import ChatGroqllm = ChatGroq(model="llama3-8b-8192") pip install -qU langchain-mistralai import getpassimport osos.environ["MISTRAL_API_KEY"] = getpass.getpass()from langchain_mistralai import ChatMistralAIllm = ChatMistralAI(model="mistral-large-latest") pip install -qU langchain-openai import getpassimport osos.environ["TOGETHER_API_KEY"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI( base_url="https://api.together.xyz/v1", api_key=os.environ["TOGETHER_API_KEY"], model="mistralai/Mixtral-8x7B-Instruct-v0.1",) # Define toolfrom langchain_core.tools import tool@tooldef complex_tool(int_arg: int, float_arg: float, dict_arg: dict) -> int: """Do something complex with a complex tool.""" return int_arg * float_arg **API Reference:**[tool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html) llm_with_tools = llm.bind_tools( [complex_tool],) # Define chainchain = llm_with_tools | (lambda msg: msg.tool_calls[0]["args"]) | complex_tool We can see that when we try to invoke this chain with even a fairly explicit input, the model fails to correctly call the tool (it forgets the `dict_arg` argument). chain.invoke( "use complex tool. the args are 5, 2.1, empty dictionary. don't forget dict_arg") ---------------------------------------------------------------------------``````outputValidationError Traceback (most recent call last)``````outputCell In[12], line 1----> 1 chain.invoke( 2 "use complex tool. the args are 5, 2.1, empty dictionary. don't forget dict_arg" 3 )``````outputFile ~/langchain/libs/core/langchain_core/runnables/base.py:2499, in RunnableSequence.invoke(self, input, config) 2497 try: 2498 for i, step in enumerate(self.steps):-> 2499 input = step.invoke( 2500 input, 2501 # mark each step as a child run 2502 patch_config( 2503 config, callbacks=run_manager.get_child(f"seq:step:{i+1}") 2504 ), 2505 ) 2506 # finish the root run 2507 except BaseException as e:``````outputFile ~/langchain/libs/core/langchain_core/tools.py:241, in BaseTool.invoke(self, input, config, **kwargs) 234 def invoke( 235 self, 236 input: Union[str, Dict], 237 config: Optional[RunnableConfig] = None, 238 **kwargs: Any, 239 ) -> Any: 240 config = ensure_config(config)--> 241 return self.run( 242 input, 243 callbacks=config.get("callbacks"), 244 tags=config.get("tags"), 245 metadata=config.get("metadata"), 246 run_name=config.get("run_name"), 247 run_id=config.pop("run_id", None), 248 **kwargs, 249 )``````outputFile ~/langchain/libs/core/langchain_core/tools.py:387, in BaseTool.run(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, **kwargs) 385 except ValidationError as e: 386 if not self.handle_validation_error:--> 387 raise e 388 elif isinstance(self.handle_validation_error, bool): 389 observation = "Tool input validation error"``````outputFile ~/langchain/libs/core/langchain_core/tools.py:378, in BaseTool.run(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, **kwargs) 364 run_manager = callback_manager.on_tool_start( 365 {"name": self.name, "description": self.description}, 366 tool_input if isinstance(tool_input, str) else str(tool_input), (...) 375 **kwargs, 376 ) 377 try:--> 378 parsed_input = self._parse_input(tool_input) 379 tool_args, tool_kwargs = self._to_args_and_kwargs(parsed_input) 380 observation = ( 381 self._run(*tool_args, run_manager=run_manager, **tool_kwargs) 382 if new_arg_supported 383 else self._run(*tool_args, **tool_kwargs) 384 )``````outputFile ~/langchain/libs/core/langchain_core/tools.py:283, in BaseTool._parse_input(self, tool_input) 281 else: 282 if input_args is not None:--> 283 result = input_args.parse_obj(tool_input) 284 return { 285 k: getattr(result, k) 286 for k, v in result.dict().items() 287 if k in tool_input 288 } 289 return tool_input``````outputFile ~/langchain/.venv/lib/python3.9/site-packages/pydantic/v1/main.py:526, in BaseModel.parse_obj(cls, obj) 524 exc = TypeError(f'{cls.__name__} expected dict not {obj.__class__.__name__}') 525 raise ValidationError([ErrorWrapper(exc, loc=ROOT_KEY)], cls) from e--> 526 return cls(**obj)``````outputFile ~/langchain/.venv/lib/python3.9/site-packages/pydantic/v1/main.py:341, in BaseModel.__init__(__pydantic_self__, **data) 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) 340 if validation_error:--> 341 raise validation_error 342 try: 343 object_setattr(__pydantic_self__, '__dict__', values)``````outputValidationError: 1 validation error for complex_toolSchemadict_arg field required (type=value_error.missing) Try/except tool call[​](#tryexcept-tool-call "Direct link to Try/except tool call") ----------------------------------------------------------------------------------- The simplest way to more gracefully handle errors is to try/except the tool-calling step and return a helpful message on errors: from typing import Anyfrom langchain_core.runnables import Runnable, RunnableConfigdef try_except_tool(tool_args: dict, config: RunnableConfig) -> Runnable: try: complex_tool.invoke(tool_args, config=config) except Exception as e: return f"Calling tool with arguments:\n\n{tool_args}\n\nraised the following error:\n\n{type(e)}: {e}"chain = llm_with_tools | (lambda msg: msg.tool_calls[0]["args"]) | try_except_tool **API Reference:**[Runnable](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html) | [RunnableConfig](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.config.RunnableConfig.html) print( chain.invoke( "use complex tool. the args are 5, 2.1, empty dictionary. don't forget dict_arg" )) Calling tool with arguments:{'int_arg': 5, 'float_arg': 2.1}raised the following error:<class 'pydantic.v1.error_wrappers.ValidationError'>: 1 validation error for complex_toolSchemadict_arg field required (type=value_error.missing) Fallbacks[​](#fallbacks "Direct link to Fallbacks") --------------------------------------------------- We can also try to fallback to a better model in the event of a tool invocation error. In this case we'll fall back to an identical chain that uses `gpt-4-1106-preview` instead of `gpt-3.5-turbo`. chain = llm_with_tools | (lambda msg: msg.tool_calls[0]["args"]) | complex_toolbetter_model = ChatOpenAI(model="gpt-4-1106-preview", temperature=0).bind_tools( [complex_tool], tool_choice="complex_tool")better_chain = better_model | (lambda msg: msg.tool_calls[0]["args"]) | complex_toolchain_with_fallback = chain.with_fallbacks([better_chain])chain_with_fallback.invoke( "use complex tool. the args are 5, 2.1, empty dictionary. don't forget dict_arg") 10.5 Looking at the [Langsmith trace](https://smith.langchain.com/public/00e91fc2-e1a4-4b0f-a82e-e6b3119d196c/r) for this chain run, we can see that the first chain call fails as expected and it's the fallback that succeeds. Retry with exception[​](#retry-with-exception "Direct link to Retry with exception") ------------------------------------------------------------------------------------ To take things one step further, we can try to automatically re-run the chain with the exception passed in, so that the model may be able to correct its behavior: import jsonfrom typing import Anyfrom langchain_core.messages import AIMessage, HumanMessage, ToolCall, ToolMessagefrom langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholderfrom langchain_core.runnables import RunnablePassthroughclass CustomToolException(Exception): """Custom LangChain tool exception.""" def __init__(self, tool_call: ToolCall, exception: Exception) -> None: super().__init__() self.tool_call = tool_call self.exception = exceptiondef tool_custom_exception(msg: AIMessage, config: RunnableConfig) -> Runnable: try: return complex_tool.invoke(msg.tool_calls[0]["args"], config=config) except Exception as e: raise CustomToolException(msg.tool_calls[0], e)def exception_to_messages(inputs: dict) -> dict: exception = inputs.pop("exception") # Add historical messages to the original input, so the model knows that it made a mistake with the last tool call. messages = [ AIMessage(content="", tool_calls=[exception.tool_call]), ToolMessage( tool_call_id=exception.tool_call["id"], content=str(exception.exception) ), HumanMessage( content="The last tool call raised an exception. Try calling the tool again with corrected arguments. Do not repeat mistakes." ), ] inputs["last_output"] = messages return inputs# We add a last_output MessagesPlaceholder to our prompt which if not passed in doesn't# affect the prompt at all, but gives us the option to insert an arbitrary list of Messages# into the prompt if needed. We'll use this on retries to insert the error message.prompt = ChatPromptTemplate.from_messages( [("human", "{input}"), MessagesPlaceholder("last_output", optional=True)])chain = prompt | llm_with_tools | tool_custom_exception# If the initial chain call fails, we rerun it withe the exception passed in as a message.self_correcting_chain = chain.with_fallbacks( [exception_to_messages | chain], exception_key="exception") **API Reference:**[AIMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html) | [HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) | [ToolCall](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolCall.html) | [ToolMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolMessage.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [MessagesPlaceholder](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.MessagesPlaceholder.html) | [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html) self_correcting_chain.invoke( { "input": "use complex tool. the args are 5, 2.1, empty dictionary. don't forget dict_arg" }) 10.5 And our chain succeeds! Looking at the [LangSmith trace](https://smith.langchain.com/public/c11e804c-e14f-4059-bd09-64766f999c14/r), we can see that indeed our initial chain still fails, and it's only on retrying that the chain succeeds. [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/tools_error.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to convert tools to OpenAI Functions ](/v0.2/docs/how_to/tools_as_openai_functions/)[ Next How to use few-shot prompting with tool calling ](/v0.2/docs/how_to/tools_few_shot/) * [Setup](#setup) * [Chain](#chain) * [Try/except tool call](#tryexcept-tool-call) * [Fallbacks](#fallbacks) * [Retry with exception](#retry-with-exception)
null
https://python.langchain.com/v0.2/docs/how_to/agent_executor/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * Build an Agent with AgentExecutor (Legacy) On this page Build an Agent with AgentExecutor (Legacy) ========================================== info This section will cover building with the legacy LangChain AgentExecutor. These are fine for getting started, but past a certain point, you will likely want flexibility and control that they do not offer. For working with more advanced agents, we'd recommend checking out [LangGraph Agents](/v0.2/docs/concepts/#langgraph) or the [migration guide](/v0.2/docs/how_to/migrate_agent/) By themselves, language models can't take actions - they just output text. A big use case for LangChain is creating **agents**. Agents are systems that use an LLM as a reasoning engine to determine which actions to take and what the inputs to those actions should be. The results of those actions can then be fed back into the agent and it determines whether more actions are needed, or whether it is okay to finish. In this tutorial, we will build an agent that can interact with multiple different tools: one being a local database, the other being a search engine. You will be able to ask this agent questions, watch it call tools, and have conversations with it. Concepts[​](#concepts "Direct link to Concepts") ------------------------------------------------ Concepts we will cover are: * Using [language models](/v0.2/docs/concepts/#chat-models), in particular their tool calling ability * Creating a [Retriever](/v0.2/docs/concepts/#retrievers) to expose specific information to our agent * Using a Search [Tool](/v0.2/docs/concepts/#tools) to look up things online * [`Chat History`](/v0.2/docs/concepts/#chat-history), which allows a chatbot to "remember" past interactions and take them into account when responding to follow-up questions. * Debugging and tracing your application using [LangSmith](/v0.2/docs/concepts/#langsmith) Setup[​](#setup "Direct link to Setup") --------------------------------------- ### Jupyter Notebook[​](#jupyter-notebook "Direct link to Jupyter Notebook") This guide (and most of the other guides in the documentation) uses [Jupyter notebooks](https://jupyter.org/) and assumes the reader is as well. Jupyter notebooks are perfect for learning how to work with LLM systems because oftentimes things can go wrong (unexpected output, API down, etc) and going through guides in an interactive environment is a great way to better understand them. This and other tutorials are perhaps most conveniently run in a Jupyter notebook. See [here](https://jupyter.org/install) for instructions on how to install. ### Installation[​](#installation "Direct link to Installation") To install LangChain run: * Pip * Conda pip install langchain conda install langchain -c conda-forge For more details, see our [Installation guide](/v0.2/docs/how_to/installation/). ### LangSmith[​](#langsmith "Direct link to LangSmith") Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with [LangSmith](https://smith.langchain.com). After you sign up at the link above, make sure to set your environment variables to start logging traces: export LANGCHAIN_TRACING_V2="true"export LANGCHAIN_API_KEY="..." Or, if in a notebook, you can set them with: import getpassimport osos.environ["LANGCHAIN_TRACING_V2"] = "true"os.environ["LANGCHAIN_API_KEY"] = getpass.getpass() Define tools[​](#define-tools "Direct link to Define tools") ------------------------------------------------------------ We first need to create the tools we want to use. We will use two tools: [Tavily](/v0.2/docs/integrations/tools/tavily_search/) (to search online) and then a retriever over a local index we will create ### [Tavily](/v0.2/docs/integrations/tools/tavily_search/)[​](#tavily "Direct link to tavily") We have a built-in tool in LangChain to easily use Tavily search engine as tool. Note that this requires an API key - they have a free tier, but if you don't have one or don't want to create one, you can always ignore this step. Once you create your API key, you will need to export that as: export TAVILY_API_KEY="..." from langchain_community.tools.tavily_search import TavilySearchResults **API Reference:**[TavilySearchResults](https://api.python.langchain.com/en/latest/tools/langchain_community.tools.tavily_search.tool.TavilySearchResults.html) search = TavilySearchResults(max_results=2) search.invoke("what is the weather in SF") [{'url': 'https://www.weatherapi.com/', 'content': "{'location': {'name': 'San Francisco', 'region': 'California', 'country': 'United States of America', 'lat': 37.78, 'lon': -122.42, 'tz_id': 'America/Los_Angeles', 'localtime_epoch': 1714000492, 'localtime': '2024-04-24 16:14'}, 'current': {'last_updated_epoch': 1713999600, 'last_updated': '2024-04-24 16:00', 'temp_c': 15.6, 'temp_f': 60.1, 'is_day': 1, 'condition': {'text': 'Overcast', 'icon': '//cdn.weatherapi.com/weather/64x64/day/122.png', 'code': 1009}, 'wind_mph': 10.5, 'wind_kph': 16.9, 'wind_degree': 330, 'wind_dir': 'NNW', 'pressure_mb': 1018.0, 'pressure_in': 30.06, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 72, 'cloud': 100, 'feelslike_c': 15.6, 'feelslike_f': 60.1, 'vis_km': 16.0, 'vis_miles': 9.0, 'uv': 5.0, 'gust_mph': 14.8, 'gust_kph': 23.8}}"}, {'url': 'https://www.weathertab.com/en/c/e/04/united-states/california/san-francisco/', 'content': 'San Francisco Weather Forecast for Apr 2024 - Risk of Rain Graph. Rain Risk Graph: Monthly Overview. Bar heights indicate rain risk percentages. Yellow bars mark low-risk days, while black and grey bars signal higher risks. Grey-yellow bars act as buffers, advising to keep at least one day clear from the riskier grey and black days, guiding ...'}] ### Retriever[​](#retriever "Direct link to Retriever") We will also create a retriever over some data of our own. For a deeper explanation of each step here, see [this tutorial](/v0.2/docs/tutorials/rag/). from langchain_community.document_loaders import WebBaseLoaderfrom langchain_community.vectorstores import FAISSfrom langchain_openai import OpenAIEmbeddingsfrom langchain_text_splitters import RecursiveCharacterTextSplitterloader = WebBaseLoader("https://docs.smith.langchain.com/overview")docs = loader.load()documents = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200).split_documents(docs)vector = FAISS.from_documents(documents, OpenAIEmbeddings())retriever = vector.as_retriever() **API Reference:**[WebBaseLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.web_base.WebBaseLoader.html) | [FAISS](https://api.python.langchain.com/en/latest/vectorstores/langchain_community.vectorstores.faiss.FAISS.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) | [RecursiveCharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.RecursiveCharacterTextSplitter.html) retriever.invoke("how to upload a dataset")[0] Document(page_content='# The data to predict and grade over evaluators=[exact_match], # The evaluators to score the results experiment_prefix="sample-experiment", # The name of the experiment metadata={ "version": "1.0.0", "revision_id": "beta" },)import { Client, Run, Example } from \'langsmith\';import { runOnDataset } from \'langchain/smith\';import { EvaluationResult } from \'langsmith/evaluation\';const client = new Client();// Define dataset: these are your test casesconst datasetName = "Sample Dataset";const dataset = await client.createDataset(datasetName, { description: "A sample dataset in LangSmith."});await client.createExamples({ inputs: [ { postfix: "to LangSmith" }, { postfix: "to Evaluations in LangSmith" }, ], outputs: [ { output: "Welcome to LangSmith" }, { output: "Welcome to Evaluations in LangSmith" }, ], datasetId: dataset.id,});// Define your evaluatorconst exactMatch = async ({ run, example }: { run: Run; example?:', metadata={'source': 'https://docs.smith.langchain.com/overview', 'title': 'Getting started with LangSmith | \uf8ffü¶úÔ∏è\uf8ffüõ†Ô∏è LangSmith', 'description': 'Introduction', 'language': 'en'}) Now that we have populated our index that we will do doing retrieval over, we can easily turn it into a tool (the format needed for an agent to properly use it) from langchain.tools.retriever import create_retriever_tool **API Reference:**[create\_retriever\_tool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.create_retriever_tool.html) retriever_tool = create_retriever_tool( retriever, "langsmith_search", "Search for information about LangSmith. For any questions about LangSmith, you must use this tool!",) ### Tools[​](#tools "Direct link to Tools") Now that we have created both, we can create a list of tools that we will use downstream. tools = [search, retriever_tool] Using Language Models[​](#using-language-models "Direct link to Using Language Models") --------------------------------------------------------------------------------------- Next, let's learn how to use a language model by to call tools. LangChain supports many different language models that you can use interchangably - select the one you want to use below! * OpenAI * Anthropic * Azure * Google * Cohere * FireworksAI * Groq * MistralAI * TogetherAI pip install -qU langchain-openai import getpassimport osos.environ["OPENAI_API_KEY"] = getpass.getpass()from langchain_openai import ChatOpenAImodel = ChatOpenAI(model="gpt-4") pip install -qU langchain-anthropic import getpassimport osos.environ["ANTHROPIC_API_KEY"] = getpass.getpass()from langchain_anthropic import ChatAnthropicmodel = ChatAnthropic(model="claude-3-sonnet-20240229") pip install -qU langchain-openai import getpassimport osos.environ["AZURE_OPENAI_API_KEY"] = getpass.getpass()from langchain_openai import AzureChatOpenAImodel = AzureChatOpenAI( azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"], openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],) pip install -qU langchain-google-vertexai import getpassimport osos.environ["GOOGLE_API_KEY"] = getpass.getpass()from langchain_google_vertexai import ChatVertexAImodel = ChatVertexAI(model="gemini-pro") pip install -qU langchain-cohere import getpassimport osos.environ["COHERE_API_KEY"] = getpass.getpass()from langchain_cohere import ChatCoheremodel = ChatCohere(model="command-r") pip install -qU langchain-fireworks import getpassimport osos.environ["FIREWORKS_API_KEY"] = getpass.getpass()from langchain_fireworks import ChatFireworksmodel = ChatFireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct") pip install -qU langchain-groq import getpassimport osos.environ["GROQ_API_KEY"] = getpass.getpass()from langchain_groq import ChatGroqmodel = ChatGroq(model="llama3-8b-8192") pip install -qU langchain-mistralai import getpassimport osos.environ["MISTRAL_API_KEY"] = getpass.getpass()from langchain_mistralai import ChatMistralAImodel = ChatMistralAI(model="mistral-large-latest") pip install -qU langchain-openai import getpassimport osos.environ["TOGETHER_API_KEY"] = getpass.getpass()from langchain_openai import ChatOpenAImodel = ChatOpenAI( base_url="https://api.together.xyz/v1", api_key=os.environ["TOGETHER_API_KEY"], model="mistralai/Mixtral-8x7B-Instruct-v0.1",) You can call the language model by passing in a list of messages. By default, the response is a `content` string. from langchain_core.messages import HumanMessageresponse = model.invoke([HumanMessage(content="hi!")])response.content **API Reference:**[HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) 'Hello! How can I assist you today?' We can now see what it is like to enable this model to do tool calling. In order to enable that we use `.bind_tools` to give the language model knowledge of these tools model_with_tools = model.bind_tools(tools) We can now call the model. Let's first call it with a normal message, and see how it responds. We can look at both the `content` field as well as the `tool_calls` field. response = model_with_tools.invoke([HumanMessage(content="Hi!")])print(f"ContentString: {response.content}")print(f"ToolCalls: {response.tool_calls}") ContentString: Hello! How can I assist you today?ToolCalls: [] Now, let's try calling it with some input that would expect a tool to be called. response = model_with_tools.invoke([HumanMessage(content="What's the weather in SF?")])print(f"ContentString: {response.content}")print(f"ToolCalls: {response.tool_calls}") ContentString: ToolCalls: [{'name': 'tavily_search_results_json', 'args': {'query': 'current weather in San Francisco'}, 'id': 'call_4HteVahXkRAkWjp6dGXryKZX'}] We can see that there's now no content, but there is a tool call! It wants us to call the Tavily Search tool. This isn't calling that tool yet - it's just telling us to. In order to actually calll it, we'll want to create our agent. Create the agent[​](#create-the-agent "Direct link to Create the agent") ------------------------------------------------------------------------ Now that we have defined the tools and the LLM, we can create the agent. We will be using a tool calling agent - for more information on this type of agent, as well as other options, see [this guide](/v0.2/docs/concepts/#agent_types/). We can first choose the prompt we want to use to guide the agent. If you want to see the contents of this prompt and have access to LangSmith, you can go to: [https://smith.langchain.com/hub/hwchase17/openai-functions-agent](https://smith.langchain.com/hub/hwchase17/openai-functions-agent) from langchain import hub# Get the prompt to use - you can modify this!prompt = hub.pull("hwchase17/openai-functions-agent")prompt.messages [SystemMessagePromptTemplate(prompt=PromptTemplate(input_variables=[], template='You are a helpful assistant')), MessagesPlaceholder(variable_name='chat_history', optional=True), HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['input'], template='{input}')), MessagesPlaceholder(variable_name='agent_scratchpad')] Now, we can initalize the agent with the LLM, the prompt, and the tools. The agent is responsible for taking in input and deciding what actions to take. Crucially, the Agent does not execute those actions - that is done by the AgentExecutor (next step). For more information about how to think about these components, see our [conceptual guide](/v0.2/docs/concepts/#agents). Note that we are passing in the `model`, not `model_with_tools`. That is because `create_tool_calling_agent` will call `.bind_tools` for us under the hood. from langchain.agents import create_tool_calling_agentagent = create_tool_calling_agent(model, tools, prompt) **API Reference:**[create\_tool\_calling\_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.tool_calling_agent.base.create_tool_calling_agent.html) Finally, we combine the agent (the brains) with the tools inside the AgentExecutor (which will repeatedly call the agent and execute tools). from langchain.agents import AgentExecutoragent_executor = AgentExecutor(agent=agent, tools=tools) **API Reference:**[AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html) Run the agent[​](#run-the-agent "Direct link to Run the agent") --------------------------------------------------------------- We can now run the agent on a few queries! Note that for now, these are all **stateless** queries (it won't remember previous interactions). First up, let's how it responds when there's no need to call a tool: agent_executor.invoke({"input": "hi!"}) {'input': 'hi!', 'output': 'Hello! How can I assist you today?'} In order to see exactly what is happening under the hood (and to make sure it's not calling a tool) we can take a look at the [LangSmith trace](https://smith.langchain.com/public/8441812b-94ce-4832-93ec-e1114214553a/r) Let's now try it out on an example where it should be invoking the retriever agent_executor.invoke({"input": "how can langsmith help with testing?"}) {'input': 'how can langsmith help with testing?', 'output': 'LangSmith is a platform that aids in building production-grade Language Learning Model (LLM) applications. It can assist with testing in several ways:\n\n1. **Monitoring and Evaluation**: LangSmith allows close monitoring and evaluation of your application. This helps you to ensure the quality of your application and deploy it with confidence.\n\n2. **Tracing**: LangSmith has tracing capabilities that can be beneficial for debugging and understanding the behavior of your application.\n\n3. **Evaluation Capabilities**: LangSmith has built-in tools for evaluating the performance of your LLM. \n\n4. **Prompt Hub**: This is a prompt management tool built into LangSmith that can help in testing different prompts and their responses.\n\nPlease note that to use LangSmith, you would need to install it and create an API key. The platform offers Python and Typescript SDKs for utilization. It works independently and does not require the use of LangChain.'} Let's take a look at the [LangSmith trace](https://smith.langchain.com/public/762153f6-14d4-4c98-8659-82650f860c62/r) to make sure it's actually calling that. Now let's try one where it needs to call the search tool: agent_executor.invoke({"input": "whats the weather in sf?"}) {'input': 'whats the weather in sf?', 'output': 'The current weather in San Francisco is partly cloudy with a temperature of 16.1°C (61.0°F). The wind is coming from the WNW at a speed of 10.5 mph. The humidity is at 67%. [source](https://www.weatherapi.com/)'} We can check out the [LangSmith trace](https://smith.langchain.com/public/36df5b1a-9a0b-4185-bae2-964e1d53c665/r) to make sure it's calling the search tool effectively. Adding in memory[​](#adding-in-memory "Direct link to Adding in memory") ------------------------------------------------------------------------ As mentioned earlier, this agent is stateless. This means it does not remember previous interactions. To give it memory we need to pass in previous `chat_history`. Note: it needs to be called `chat_history` because of the prompt we are using. If we use a different prompt, we could change the variable name # Here we pass in an empty list of messages for chat_history because it is the first message in the chatagent_executor.invoke({"input": "hi! my name is bob", "chat_history": []}) {'input': 'hi! my name is bob', 'chat_history': [], 'output': 'Hello Bob! How can I assist you today?'} from langchain_core.messages import AIMessage, HumanMessage **API Reference:**[AIMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html) | [HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) agent_executor.invoke( { "chat_history": [ HumanMessage(content="hi! my name is bob"), AIMessage(content="Hello Bob! How can I assist you today?"), ], "input": "what's my name?", }) {'chat_history': [HumanMessage(content='hi! my name is bob'), AIMessage(content='Hello Bob! How can I assist you today?')], 'input': "what's my name?", 'output': 'Your name is Bob. How can I assist you further?'} If we want to keep track of these messages automatically, we can wrap this in a RunnableWithMessageHistory. For more information on how to use this, see [this guide](/v0.2/docs/how_to/message_history/). from langchain_community.chat_message_histories import ChatMessageHistoryfrom langchain_core.chat_history import BaseChatMessageHistoryfrom langchain_core.runnables.history import RunnableWithMessageHistorystore = {}def get_session_history(session_id: str) -> BaseChatMessageHistory: if session_id not in store: store[session_id] = ChatMessageHistory() return store[session_id] **API Reference:**[ChatMessageHistory](https://api.python.langchain.com/en/latest/chat_history/langchain_core.chat_history.ChatMessageHistory.html) | [BaseChatMessageHistory](https://api.python.langchain.com/en/latest/chat_history/langchain_core.chat_history.BaseChatMessageHistory.html) | [RunnableWithMessageHistory](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html) Because we have multiple inputs, we need to specify two things: * `input_messages_key`: The input key to use to add to the conversation history. * `history_messages_key`: The key to add the loaded messages into. agent_with_chat_history = RunnableWithMessageHistory( agent_executor, get_session_history, input_messages_key="input", history_messages_key="chat_history",) agent_with_chat_history.invoke( {"input": "hi! I'm bob"}, config={"configurable": {"session_id": "<foo>"}},) {'input': "hi! I'm bob", 'chat_history': [], 'output': 'Hello Bob! How can I assist you today?'} agent_with_chat_history.invoke( {"input": "what's my name?"}, config={"configurable": {"session_id": "<foo>"}},) {'input': "what's my name?", 'chat_history': [HumanMessage(content="hi! I'm bob"), AIMessage(content='Hello Bob! How can I assist you today?')], 'output': 'Your name is Bob.'} Example LangSmith trace: [https://smith.langchain.com/public/98c8d162-60ae-4493-aa9f-992d87bd0429/r](https://smith.langchain.com/public/98c8d162-60ae-4493-aa9f-992d87bd0429/r) Conclusion[​](#conclusion "Direct link to Conclusion") ------------------------------------------------------ That's a wrap! In this quick start we covered how to create a simple agent. Agents are a complex topic, and there's lot to learn! info This section covered building with LangChain Agents. LangChain Agents are fine for getting started, but past a certain point you will likely want flexibility and control that they do not offer. For working with more advanced agents, we'd reccommend checking out [LangGraph](/v0.2/docs/concepts/#langgraph) If you want to continue using LangChain agents, some good advanced guides are: * [How to use LangGraph's built-in versions of `AgentExecutor`](/v0.2/docs/how_to/migrate_agent/) * [How to create a custom agent](https://python.langchain.com/v0.1/docs/modules/agents/how_to/custom_agent/) * [How to stream responses from an agent](https://python.langchain.com/v0.1/docs/modules/agents/how_to/streaming/) * [How to return structured output from an agent](https://python.langchain.com/v0.1/docs/modules/agents/how_to/agent_structured/) [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/agent_executor.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to add ad-hoc tool calling capability to LLMs and Chat Models ](/v0.2/docs/how_to/tools_prompting/)[ Next How to construct knowledge graphs ](/v0.2/docs/how_to/graph_constructing/) * [Concepts](#concepts) * [Setup](#setup) * [Jupyter Notebook](#jupyter-notebook) * [Installation](#installation) * [LangSmith](#langsmith) * [Define tools](#define-tools) * [Tavily](#tavily) * [Retriever](#retriever) * [Tools](#tools) * [Using Language Models](#using-language-models) * [Create the agent](#create-the-agent) * [Run the agent](#run-the-agent) * [Adding in memory](#adding-in-memory) * [Conclusion](#conclusion)
null
https://python.langchain.com/v0.2/docs/how_to/query_multiple_queries/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to handle multiple queries when doing query analysis On this page How to handle multiple queries when doing query analysis ======================================================== Sometimes, a query analysis technique may allow for multiple queries to be generated. In these cases, we need to remember to run all queries and then to combine the results. We will show a simple example (using mock data) of how to do that. Setup[​](#setup "Direct link to Setup") --------------------------------------- #### Install dependencies[​](#install-dependencies "Direct link to Install dependencies") # %pip install -qU langchain langchain-community langchain-openai langchain-chroma #### Set environment variables[​](#set-environment-variables "Direct link to Set environment variables") We'll use OpenAI in this example: import getpassimport osos.environ["OPENAI_API_KEY"] = getpass.getpass()# Optional, uncomment to trace runs with LangSmith. Sign up here: https://smith.langchain.com.# os.environ["LANGCHAIN_TRACING_V2"] = "true"# os.environ["LANGCHAIN_API_KEY"] = getpass.getpass() ### Create Index[​](#create-index "Direct link to Create Index") We will create a vectorstore over fake information. from langchain_chroma import Chromafrom langchain_openai import OpenAIEmbeddingsfrom langchain_text_splitters import RecursiveCharacterTextSplittertexts = ["Harrison worked at Kensho", "Ankush worked at Facebook"]embeddings = OpenAIEmbeddings(model="text-embedding-3-small")vectorstore = Chroma.from_texts( texts, embeddings,)retriever = vectorstore.as_retriever(search_kwargs={"k": 1}) **API Reference:**[OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) | [RecursiveCharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.RecursiveCharacterTextSplitter.html) Query analysis[​](#query-analysis "Direct link to Query analysis") ------------------------------------------------------------------ We will use function calling to structure the output. We will let it return multiple queries. from typing import List, Optionalfrom langchain_core.pydantic_v1 import BaseModel, Fieldclass Search(BaseModel): """Search over a database of job records.""" queries: List[str] = Field( ..., description="Distinct queries to search for", ) from langchain_core.output_parsers.openai_tools import PydanticToolsParserfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.runnables import RunnablePassthroughfrom langchain_openai import ChatOpenAIoutput_parser = PydanticToolsParser(tools=[Search])system = """You have the ability to issue search queries to get information to help answer user information.If you need to look up two distinct pieces of information, you are allowed to do that!"""prompt = ChatPromptTemplate.from_messages( [ ("system", system), ("human", "{question}"), ])llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)structured_llm = llm.with_structured_output(Search)query_analyzer = {"question": RunnablePassthrough()} | prompt | structured_llm **API Reference:**[PydanticToolsParser](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.openai_tools.PydanticToolsParser.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) /Users/harrisonchase/workplace/langchain/libs/core/langchain_core/_api/beta_decorator.py:86: LangChainBetaWarning: The function `with_structured_output` is in beta. It is actively being worked on, so the API may change. warn_beta( We can see that this allows for creating multiple queries query_analyzer.invoke("where did Harrison Work") Search(queries=['Harrison work location']) query_analyzer.invoke("where did Harrison and ankush Work") Search(queries=['Harrison work place', 'Ankush work place']) Retrieval with query analysis[​](#retrieval-with-query-analysis "Direct link to Retrieval with query analysis") --------------------------------------------------------------------------------------------------------------- So how would we include this in a chain? One thing that will make this a lot easier is if we call our retriever asyncronously - this will let us loop over the queries and not get blocked on the response time. from langchain_core.runnables import chain **API Reference:**[chain](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.chain.html) @chainasync def custom_chain(question): response = await query_analyzer.ainvoke(question) docs = [] for query in response.queries: new_docs = await retriever.ainvoke(query) docs.extend(new_docs) # You probably want to think about reranking or deduplicating documents here # But that is a separate topic return docs await custom_chain.ainvoke("where did Harrison Work") [Document(page_content='Harrison worked at Kensho')] await custom_chain.ainvoke("where did Harrison and ankush Work") [Document(page_content='Harrison worked at Kensho'), Document(page_content='Ankush worked at Facebook')] [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/query_multiple_queries.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to partially format prompt templates ](/v0.2/docs/how_to/prompts_partial/)[ Next How to use built-in tools and toolkits ](/v0.2/docs/how_to/tools_builtin/) * [Setup](#setup) * [Create Index](#create-index) * [Query analysis](#query-analysis) * [Retrieval with query analysis](#retrieval-with-query-analysis)
null
https://python.langchain.com/v0.2/docs/how_to/tools_model_specific/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to bind model-specific tools How to bind model-specific tools ================================ Providers adopt different conventions for formatting tool schemas. For instance, OpenAI uses a format like this: * `type`: The type of the tool. At the time of writing, this is always `"function"`. * `function`: An object containing tool parameters. * `function.name`: The name of the schema to output. * `function.description`: A high level description of the schema to output. * `function.parameters`: The nested details of the schema you want to extract, formatted as a [JSON schema](https://json-schema.org/) dict. We can bind this model-specific format directly to the model as well if preferred. Here's an example: from langchain_openai import ChatOpenAImodel = ChatOpenAI()model_with_tools = model.bind( tools=[ { "type": "function", "function": { "name": "multiply", "description": "Multiply two integers together.", "parameters": { "type": "object", "properties": { "a": {"type": "number", "description": "First integer"}, "b": {"type": "number", "description": "Second integer"}, }, "required": ["a", "b"], }, }, } ])model_with_tools.invoke("Whats 119 times 8?") **API Reference:**[ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_mn4ELw1NbuE0DFYhIeK0GrPe', 'function': {'arguments': '{"a":119,"b":8}', 'name': 'multiply'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 17, 'prompt_tokens': 62, 'total_tokens': 79}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_c2295e73ad', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-353e8a9a-7125-4f94-8c68-4f3da4c21120-0', tool_calls=[{'name': 'multiply', 'args': {'a': 119, 'b': 8}, 'id': 'call_mn4ELw1NbuE0DFYhIeK0GrPe'}]) This is functionally equivalent to the `bind_tools()` method. [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/tools_model_specific.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to add a human-in-the-loop for tools ](/v0.2/docs/how_to/tools_human/)[ Next How to trim messages ](/v0.2/docs/how_to/trim_messages/)
null
https://python.langchain.com/v0.2/docs/how_to/prompts_partial/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to partially format prompt templates On this page How to partially format prompt templates ======================================== Prerequisites This guide assumes familiarity with the following concepts: * [Prompt templates](/v0.2/docs/concepts/#prompt-templates) Like partially binding arguments to a function, it can make sense to "partial" a prompt template - e.g. pass in a subset of the required values, as to create a new prompt template which expects only the remaining subset of values. LangChain supports this in two ways: 1. Partial formatting with string values. 2. Partial formatting with functions that return string values. In the examples below, we go over the motivations for both use cases as well as how to do it in LangChain. Partial with strings[​](#partial-with-strings "Direct link to Partial with strings") ------------------------------------------------------------------------------------ One common use case for wanting to partial a prompt template is if you get access to some of the variables in a prompt before others. For example, suppose you have a prompt template that requires two variables, `foo` and `baz`. If you get the `foo` value early on in your chain, but the `baz` value later, it can be inconvenient to pass both variables all the way through the chain. Instead, you can partial the prompt template with the `foo` value, and then pass the partialed prompt template along and just use that. Below is an example of doing this: from langchain_core.prompts import PromptTemplateprompt = PromptTemplate.from_template("{foo}{bar}")partial_prompt = prompt.partial(foo="foo")print(partial_prompt.format(bar="baz")) **API Reference:**[PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html) foobaz You can also just initialize the prompt with the partialed variables. prompt = PromptTemplate( template="{foo}{bar}", input_variables=["bar"], partial_variables={"foo": "foo"})print(prompt.format(bar="baz")) foobaz Partial with functions[​](#partial-with-functions "Direct link to Partial with functions") ------------------------------------------------------------------------------------------ The other common use is to partial with a function. The use case for this is when you have a variable you know that you always want to fetch in a common way. A prime example of this is with date or time. Imagine you have a prompt which you always want to have the current date. You can't hard code it in the prompt, and passing it along with the other input variables is inconvenient. In this case, it's handy to be able to partial the prompt with a function that always returns the current date. from datetime import datetimedef _get_datetime(): now = datetime.now() return now.strftime("%m/%d/%Y, %H:%M:%S")prompt = PromptTemplate( template="Tell me a {adjective} joke about the day {date}", input_variables=["adjective", "date"],)partial_prompt = prompt.partial(date=_get_datetime)print(partial_prompt.format(adjective="funny")) Tell me a funny joke about the day 04/21/2024, 19:43:57 You can also just initialize the prompt with the partialed variables, which often makes more sense in this workflow. prompt = PromptTemplate( template="Tell me a {adjective} joke about the day {date}", input_variables=["adjective"], partial_variables={"date": _get_datetime},)print(prompt.format(adjective="funny")) Tell me a funny joke about the day 04/21/2024, 19:43:57 Next steps[​](#next-steps "Direct link to Next steps") ------------------------------------------------------ You've now learned how to partially apply variables to your prompt templates. Next, check out the other how-to guides on prompt templates in this section, like [adding few-shot examples to your prompt templates](/v0.2/docs/how_to/few_shot_examples_chat/). [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/prompts_partial.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to construct knowledge graphs ](/v0.2/docs/how_to/graph_constructing/)[ Next How to handle multiple queries when doing query analysis ](/v0.2/docs/how_to/query_multiple_queries/) * [Partial with strings](#partial-with-strings) * [Partial with functions](#partial-with-functions) * [Next steps](#next-steps)
null
https://python.langchain.com/v0.2/docs/how_to/tools_human/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to add a human-in-the-loop for tools On this page How to add a human-in-the-loop for tools ======================================== There are certain tools that we don't trust a model to execute on its own. One thing we can do in such situations is require human approval before the tool is invoked. info This how-to guide shows a simple way to add human-in-the-loop for code running in a jupyter notebook or in a terminal. To build a production application, you will need to do more work to keep track of application state appropriately. We recommend using `langgraph` for powering such a capability. For more details, please see this [guide](https://langchain-ai.github.io/langgraph/how-tos/human-in-the-loop/). Setup[​](#setup "Direct link to Setup") --------------------------------------- We'll need to install the following packages: %pip install --upgrade --quiet langchain And set these environment variables: import getpassimport os# If you'd like to use LangSmith, uncomment the below:# os.environ["LANGCHAIN_TRACING_V2"] = "true"# os.environ["LANGCHAIN_API_KEY"] = getpass.getpass() Chain[​](#chain "Direct link to Chain") --------------------------------------- Let's create a few simple (dummy) tools and a tool-calling chain: * OpenAI * Anthropic * Azure * Google * Cohere * FireworksAI * Groq * MistralAI * TogetherAI pip install -qU langchain-openai import getpassimport osos.environ["OPENAI_API_KEY"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI(model="gpt-3.5-turbo-0125") pip install -qU langchain-anthropic import getpassimport osos.environ["ANTHROPIC_API_KEY"] = getpass.getpass()from langchain_anthropic import ChatAnthropicllm = ChatAnthropic(model="claude-3-sonnet-20240229") pip install -qU langchain-openai import getpassimport osos.environ["AZURE_OPENAI_API_KEY"] = getpass.getpass()from langchain_openai import AzureChatOpenAIllm = AzureChatOpenAI( azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"], openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],) pip install -qU langchain-google-vertexai import getpassimport osos.environ["GOOGLE_API_KEY"] = getpass.getpass()from langchain_google_vertexai import ChatVertexAIllm = ChatVertexAI(model="gemini-pro") pip install -qU langchain-cohere import getpassimport osos.environ["COHERE_API_KEY"] = getpass.getpass()from langchain_cohere import ChatCoherellm = ChatCohere(model="command-r") pip install -qU langchain-fireworks import getpassimport osos.environ["FIREWORKS_API_KEY"] = getpass.getpass()from langchain_fireworks import ChatFireworksllm = ChatFireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct") pip install -qU langchain-groq import getpassimport osos.environ["GROQ_API_KEY"] = getpass.getpass()from langchain_groq import ChatGroqllm = ChatGroq(model="llama3-8b-8192") pip install -qU langchain-mistralai import getpassimport osos.environ["MISTRAL_API_KEY"] = getpass.getpass()from langchain_mistralai import ChatMistralAIllm = ChatMistralAI(model="mistral-large-latest") pip install -qU langchain-openai import getpassimport osos.environ["TOGETHER_API_KEY"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI( base_url="https://api.together.xyz/v1", api_key=os.environ["TOGETHER_API_KEY"], model="mistralai/Mixtral-8x7B-Instruct-v0.1",) from typing import Dict, Listfrom langchain_core.messages import AIMessagefrom langchain_core.runnables import Runnable, RunnablePassthroughfrom langchain_core.tools import tool@tooldef count_emails(last_n_days: int) -> int: """Multiply two integers together.""" return last_n_days * 2@tooldef send_email(message: str, recipient: str) -> str: "Add two integers." return f"Successfully sent email to {recipient}."tools = [count_emails, send_email]llm_with_tools = llm.bind_tools(tools)def call_tools(msg: AIMessage) -> List[Dict]: """Simple sequential tool calling helper.""" tool_map = {tool.name: tool for tool in tools} tool_calls = msg.tool_calls.copy() for tool_call in tool_calls: tool_call["output"] = tool_map[tool_call["name"]].invoke(tool_call["args"]) return tool_callschain = llm_with_tools | call_toolschain.invoke("how many emails did i get in the last 5 days?") **API Reference:**[AIMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html) | [Runnable](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html) | [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html) | [tool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html) [{'name': 'count_emails', 'args': {'last_n_days': 5}, 'id': 'toolu_01QYZdJ4yPiqsdeENWHqioFW', 'output': 10}] Adding human approval[​](#adding-human-approval "Direct link to Adding human approval") --------------------------------------------------------------------------------------- Let's add a step in the chain that will ask a person to approve or reject the tall call request. On rejection, the step will raise an exception which will stop execution of the rest of the chain. import jsonclass NotApproved(Exception): """Custom exception."""def human_approval(msg: AIMessage) -> AIMessage: """Responsible for passing through its input or raising an exception. Args: msg: output from the chat model Returns: msg: original output from the msg """ tool_strs = "\n\n".join( json.dumps(tool_call, indent=2) for tool_call in msg.tool_calls ) input_msg = ( f"Do you approve of the following tool invocations\n\n{tool_strs}\n\n" "Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no.\n >>>" ) resp = input(input_msg) if resp.lower() not in ("yes", "y"): raise NotApproved(f"Tool invocations not approved:\n\n{tool_strs}") return msg chain = llm_with_tools | human_approval | call_toolschain.invoke("how many emails did i get in the last 5 days?") Do you approve of the following tool invocations{ "name": "count_emails", "args": { "last_n_days": 5 }, "id": "toolu_01WbD8XeMoQaRFtsZezfsHor"}Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no. >>> yes [{'name': 'count_emails', 'args': {'last_n_days': 5}, 'id': 'toolu_01WbD8XeMoQaRFtsZezfsHor', 'output': 10}] try: chain.invoke("Send [email protected] an email saying 'What's up homie'")except NotApproved as e: print() print(e) Do you approve of the following tool invocations{ "name": "send_email", "args": { "recipient": "[email protected]", "message": "What's up homie" }, "id": "toolu_014XccHFzBiVcc9GV1harV9U"}Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no. >>> no``````outputTool invocations not approved:{ "name": "send_email", "args": { "recipient": "[email protected]", "message": "What's up homie" }, "id": "toolu_014XccHFzBiVcc9GV1harV9U"} [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/tools_human.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to use few-shot prompting with tool calling ](/v0.2/docs/how_to/tools_few_shot/)[ Next How to bind model-specific tools ](/v0.2/docs/how_to/tools_model_specific/) * [Setup](#setup) * [Chain](#chain) * [Adding human approval](#adding-human-approval)
null
https://python.langchain.com/v0.2/docs/how_to/trim_messages/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to trim messages On this page How to trim messages ==================== Prerequisites This guide assumes familiarity with the following concepts: * [Messages](/v0.2/docs/concepts/#messages) * [Chat models](/v0.2/docs/concepts/#chat-models) * [Chaining](/v0.2/docs/how_to/sequence/) * [Chat history](/v0.2/docs/concepts/#chat-history) The methods in this guide also require `langchain-core>=0.2.9`. All models have finite context windows, meaning there's a limit to how many tokens they can take as input. If you have very long messages or a chain/agent that accumulates a long message is history, you'll need to manage the length of the messages you're passing in to the model. The `trim_messages` util provides some basic strategies for trimming a list of messages to be of a certain token length. Getting the last `max_tokens` tokens[​](#getting-the-last-max_tokens-tokens "Direct link to getting-the-last-max_tokens-tokens") -------------------------------------------------------------------------------------------------------------------------------- To get the last `max_tokens` in the list of Messages we can set `strategy="last"`. Notice that for our `token_counter` we can pass in a function (more on that below) or a language model (since language models have a message token counting method). It makes sense to pass in a model when you're trimming your messages to fit into the context window of that specific model: # pip install -U langchain-openaifrom langchain_core.messages import ( AIMessage, HumanMessage, SystemMessage, trim_messages,)from langchain_openai import ChatOpenAImessages = [ SystemMessage("you're a good assistant, you always respond with a joke."), HumanMessage("i wonder why it's called langchain"), AIMessage( 'Well, I guess they thought "WordRope" and "SentenceString" just didn\'t have the same ring to it!' ), HumanMessage("and who is harrison chasing anyways"), AIMessage( "Hmmm let me think.\n\nWhy, he's probably chasing after the last cup of coffee in the office!" ), HumanMessage("what do you call a speechless parrot"),]trim_messages( messages, max_tokens=45, strategy="last", token_counter=ChatOpenAI(model="gpt-4o"),) **API Reference:**[AIMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html) | [HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) | [SystemMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.system.SystemMessage.html) | [trim\_messages](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.utils.trim_messages.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) [AIMessage(content="Hmmm let me think.\n\nWhy, he's probably chasing after the last cup of coffee in the office!"), HumanMessage(content='what do you call a speechless parrot')] If we want to always keep the initial system message we can specify `include_system=True`: trim_messages( messages, max_tokens=45, strategy="last", token_counter=ChatOpenAI(model="gpt-4o"), include_system=True,) [SystemMessage(content="you're a good assistant, you always respond with a joke."), HumanMessage(content='what do you call a speechless parrot')] If we want to allow splitting up the contents of a message we can specify `allow_partial=True`: trim_messages( messages, max_tokens=56, strategy="last", token_counter=ChatOpenAI(model="gpt-4o"), include_system=True, allow_partial=True,) [SystemMessage(content="you're a good assistant, you always respond with a joke."), AIMessage(content="\nWhy, he's probably chasing after the last cup of coffee in the office!"), HumanMessage(content='what do you call a speechless parrot')] If we need to make sure that our first message (excluding the system message) is always of a specific type, we can specify `start_on`: trim_messages( messages, max_tokens=60, strategy="last", token_counter=ChatOpenAI(model="gpt-4o"), include_system=True, start_on="human",) [SystemMessage(content="you're a good assistant, you always respond with a joke."), HumanMessage(content='what do you call a speechless parrot')] Getting the first `max_tokens` tokens[​](#getting-the-first-max_tokens-tokens "Direct link to getting-the-first-max_tokens-tokens") ----------------------------------------------------------------------------------------------------------------------------------- We can perform the flipped operation of getting the _first_ `max_tokens` by specifying `strategy="first"`: trim_messages( messages, max_tokens=45, strategy="first", token_counter=ChatOpenAI(model="gpt-4o"),) [SystemMessage(content="you're a good assistant, you always respond with a joke."), HumanMessage(content="i wonder why it's called langchain")] Writing a custom token counter[​](#writing-a-custom-token-counter "Direct link to Writing a custom token counter") ------------------------------------------------------------------------------------------------------------------ We can write a custom token counter function that takes in a list of messages and returns an int. from typing import List# pip install tiktokenimport tiktokenfrom langchain_core.messages import BaseMessage, ToolMessagedef str_token_counter(text: str) -> int: enc = tiktoken.get_encoding("o200k_base") return len(enc.encode(text))def tiktoken_counter(messages: List[BaseMessage]) -> int: """Approximately reproduce https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb For simplicity only supports str Message.contents. """ num_tokens = 3 # every reply is primed with <|start|>assistant<|message|> tokens_per_message = 3 tokens_per_name = 1 for msg in messages: if isinstance(msg, HumanMessage): role = "user" elif isinstance(msg, AIMessage): role = "assistant" elif isinstance(msg, ToolMessage): role = "tool" elif isinstance(msg, SystemMessage): role = "system" else: raise ValueError(f"Unsupported messages type {msg.__class__}") num_tokens += ( tokens_per_message + str_token_counter(role) + str_token_counter(msg.content) ) if msg.name: num_tokens += tokens_per_name + str_token_counter(msg.name) return num_tokenstrim_messages( messages, max_tokens=45, strategy="last", token_counter=tiktoken_counter,) **API Reference:**[BaseMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.base.BaseMessage.html) | [ToolMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolMessage.html) [AIMessage(content="Hmmm let me think.\n\nWhy, he's probably chasing after the last cup of coffee in the office!"), HumanMessage(content='what do you call a speechless parrot')] Chaining[​](#chaining "Direct link to Chaining") ------------------------------------------------ `trim_messages` can be used in an imperatively (like above) or declaratively, making it easy to compose with other components in a chain llm = ChatOpenAI(model="gpt-4o")# Notice we don't pass in messages. This creates# a RunnableLambda that takes messages as inputtrimmer = trim_messages( max_tokens=45, strategy="last", token_counter=llm, include_system=True,)chain = trimmer | llmchain.invoke(messages) AIMessage(content='A: A "Polly-gone"!', response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 32, 'total_tokens': 41}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_66b29dffce', 'finish_reason': 'stop', 'logprobs': None}, id='run-83e96ddf-bcaa-4f63-824c-98b0f8a0d474-0', usage_metadata={'input_tokens': 32, 'output_tokens': 9, 'total_tokens': 41}) Looking at the LangSmith trace we can see that before the messages are passed to the model they are first trimmed: [https://smith.langchain.com/public/65af12c4-c24d-4824-90f0-6547566e59bb/r](https://smith.langchain.com/public/65af12c4-c24d-4824-90f0-6547566e59bb/r) Looking at just the trimmer, we can see that it's a Runnable object that can be invoked like all Runnables: trimmer.invoke(messages) [SystemMessage(content="you're a good assistant, you always respond with a joke."), HumanMessage(content='what do you call a speechless parrot')] Using with ChatMessageHistory[​](#using-with-chatmessagehistory "Direct link to Using with ChatMessageHistory") --------------------------------------------------------------------------------------------------------------- Trimming messages is especially useful when [working with chat histories](/v0.2/docs/how_to/message_history/), which can get arbitrarily long: from langchain_core.chat_history import InMemoryChatMessageHistoryfrom langchain_core.runnables.history import RunnableWithMessageHistorychat_history = InMemoryChatMessageHistory(messages=messages[:-1])def dummy_get_session_history(session_id): if session_id != "1": return InMemoryChatMessageHistory() return chat_historyllm = ChatOpenAI(model="gpt-4o")trimmer = trim_messages( max_tokens=45, strategy="last", token_counter=llm, include_system=True,)chain = trimmer | llmchain_with_history = RunnableWithMessageHistory(chain, dummy_get_session_history)chain_with_history.invoke( [HumanMessage("what do you call a speechless parrot")], config={"configurable": {"session_id": "1"}},) **API Reference:**[InMemoryChatMessageHistory](https://api.python.langchain.com/en/latest/chat_history/langchain_core.chat_history.InMemoryChatMessageHistory.html) | [RunnableWithMessageHistory](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html) AIMessage(content='A "polly-no-wanna-cracker"!', response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 32, 'total_tokens': 42}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_5bf7397cd3', 'finish_reason': 'stop', 'logprobs': None}, id='run-054dd309-3497-4e7b-b22a-c1859f11d32e-0', usage_metadata={'input_tokens': 32, 'output_tokens': 10, 'total_tokens': 42}) Looking at the LangSmith trace we can see that we retrieve all of our messages but before the messages are passed to the model they are trimmed to be just the system message and last human message: [https://smith.langchain.com/public/17dd700b-9994-44ca-930c-116e00997315/r](https://smith.langchain.com/public/17dd700b-9994-44ca-930c-116e00997315/r) API reference[​](#api-reference "Direct link to API reference") --------------------------------------------------------------- For a complete description of all arguments head to the API reference: [https://api.python.langchain.com/en/latest/messages/langchain\_core.messages.utils.trim\_messages.html](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.utils.trim_messages.html) [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/trim_messages.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to bind model-specific tools ](/v0.2/docs/how_to/tools_model_specific/)[ Next How to create and query vector stores ](/v0.2/docs/how_to/vectorstores/) * [Getting the last `max_tokens` tokens](#getting-the-last-max_tokens-tokens) * [Getting the first `max_tokens` tokens](#getting-the-first-max_tokens-tokens) * [Writing a custom token counter](#writing-a-custom-token-counter) * [Chaining](#chaining) * [Using with ChatMessageHistory](#using-with-chatmessagehistory) * [API reference](#api-reference)
null
https://python.langchain.com/v0.2/docs/how_to/tools_builtin/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to use built-in tools and toolkits On this page How to use built-in tools and toolkits ====================================== Prerequisites This guide assumes familiarity with the following concepts: * [LangChain Tools](/v0.2/docs/concepts/#tools) * [LangChain Toolkits](/v0.2/docs/concepts/#tools) Tools[​](#tools "Direct link to Tools") --------------------------------------- LangChain has a large collection of 3rd party tools. Please visit [Tool Integrations](/v0.2/docs/integrations/tools/) for a list of the available tools. info When using 3rd party tools, make sure that you understand how the tool works, what permissions it has. Read over its documentation and check if anything is required from you from a security point of view. Please see our [security](https://python.langchain.com/v0.2/docs/security/) guidelines for more information. Let's try out the [Wikipedia integration](/v0.2/docs/integrations/tools/wikipedia/). !pip install -qU wikipedia from langchain_community.tools import WikipediaQueryRunfrom langchain_community.utilities import WikipediaAPIWrapperapi_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100)tool = WikipediaQueryRun(api_wrapper=api_wrapper)print(tool.invoke({"query": "langchain"})) **API Reference:**[WikipediaQueryRun](https://api.python.langchain.com/en/latest/tools/langchain_community.tools.wikipedia.tool.WikipediaQueryRun.html) | [WikipediaAPIWrapper](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.wikipedia.WikipediaAPIWrapper.html) Page: LangChainSummary: LangChain is a framework designed to simplify the creation of applications The tool has the following defaults associated with it: print(f"Name: {tool.name}")print(f"Description: {tool.description}")print(f"args schema: {tool.args}")print(f"returns directly?: {tool.return_direct}") Name: wiki-toolDescription: look up things in wikipediaargs schema: {'query': {'title': 'Query', 'description': 'query to look up in Wikipedia, should be 3 or less words', 'type': 'string'}}returns directly?: True Customizing Default Tools[​](#customizing-default-tools "Direct link to Customizing Default Tools") --------------------------------------------------------------------------------------------------- We can also modify the built in name, description, and JSON schema of the arguments. When defining the JSON schema of the arguments, it is important that the inputs remain the same as the function, so you shouldn't change that. But you can define custom descriptions for each input easily. from langchain_community.tools import WikipediaQueryRunfrom langchain_community.utilities import WikipediaAPIWrapperfrom langchain_core.pydantic_v1 import BaseModel, Fieldclass WikiInputs(BaseModel): """Inputs to the wikipedia tool.""" query: str = Field( description="query to look up in Wikipedia, should be 3 or less words" )tool = WikipediaQueryRun( name="wiki-tool", description="look up things in wikipedia", args_schema=WikiInputs, api_wrapper=api_wrapper, return_direct=True,)print(tool.run("langchain")) **API Reference:**[WikipediaQueryRun](https://api.python.langchain.com/en/latest/tools/langchain_community.tools.wikipedia.tool.WikipediaQueryRun.html) | [WikipediaAPIWrapper](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.wikipedia.WikipediaAPIWrapper.html) Page: LangChainSummary: LangChain is a framework designed to simplify the creation of applications print(f"Name: {tool.name}")print(f"Description: {tool.description}")print(f"args schema: {tool.args}")print(f"returns directly?: {tool.return_direct}") Name: wiki-toolDescription: look up things in wikipediaargs schema: {'query': {'title': 'Query', 'description': 'query to look up in Wikipedia, should be 3 or less words', 'type': 'string'}}returns directly?: True How to use built-in toolkits[​](#how-to-use-built-in-toolkits "Direct link to How to use built-in toolkits") ------------------------------------------------------------------------------------------------------------ Toolkits are collections of tools that are designed to be used together for specific tasks. They have convenient loading methods. For a complete list of available ready-made toolkits, visit [Integrations](/v0.2/docs/integrations/toolkits/). All Toolkits expose a `get_tools` method which returns a list of tools. You're usually meant to use them this way: # Initialize a toolkittoolkit = ExampleTookit(...)# Get list of toolstools = toolkit.get_tools() [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/tools_builtin.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to handle multiple queries when doing query analysis ](/v0.2/docs/how_to/query_multiple_queries/)[ Next How to pass through arguments from one step to the next ](/v0.2/docs/how_to/passthrough/) * [Tools](#tools) * [Customizing Default Tools](#customizing-default-tools) * [How to use built-in toolkits](#how-to-use-built-in-toolkits)
null
https://python.langchain.com/v0.2/docs/how_to/vectorstores/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to create and query vector stores On this page How to create and query vector stores ===================================== info Head to [Integrations](/v0.2/docs/integrations/vectorstores/) for documentation on built-in integrations with 3rd-party vector stores. One of the most common ways to store and search over unstructured data is to embed it and store the resulting embedding vectors, and then at query time to embed the unstructured query and retrieve the embedding vectors that are 'most similar' to the embedded query. A vector store takes care of storing embedded data and performing vector search for you. Get started[​](#get-started "Direct link to Get started") --------------------------------------------------------- This guide showcases basic functionality related to vector stores. A key part of working with vector stores is creating the vector to put in them, which is usually created via embeddings. Therefore, it is recommended that you familiarize yourself with the [text embedding model interfaces](/v0.2/docs/how_to/embed_text/) before diving into this. Before using the vectorstore at all, we need to load some data and initialize an embedding model. We want to use OpenAIEmbeddings so we have to get the OpenAI API Key. import osimport getpassos.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:') from langchain_community.document_loaders import TextLoaderfrom langchain_openai import OpenAIEmbeddingsfrom langchain_text_splitters import CharacterTextSplitter# Load the document, split it into chunks, embed each chunk and load it into the vector store.raw_documents = TextLoader('state_of_the_union.txt').load()text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)documents = text_splitter.split_documents(raw_documents) **API Reference:**[TextLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.text.TextLoader.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) | [CharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.CharacterTextSplitter.html) There are many great vector store options, here are a few that are free, open-source, and run entirely on your local machine. Review all integrations for many great hosted offerings. * Chroma * FAISS * Lance This walkthrough uses the `chroma` vector database, which runs on your local machine as a library. pip install langchain-chroma from langchain_chroma import Chromadb = Chroma.from_documents(documents, OpenAIEmbeddings()) This walkthrough uses the `FAISS` vector database, which makes use of the Facebook AI Similarity Search (FAISS) library. pip install faiss-cpu from langchain_community.vectorstores import FAISSdb = FAISS.from_documents(documents, OpenAIEmbeddings()) **API Reference:**[FAISS](https://api.python.langchain.com/en/latest/vectorstores/langchain_community.vectorstores.faiss.FAISS.html) This notebook shows how to use functionality related to the LanceDB vector database based on the Lance data format. pip install lancedb from langchain_community.vectorstores import LanceDBimport lancedbdb = lancedb.connect("/tmp/lancedb")table = db.create_table( "my_table", data=[ { "vector": embeddings.embed_query("Hello World"), "text": "Hello World", "id": "1", } ], mode="overwrite",)db = LanceDB.from_documents(documents, OpenAIEmbeddings()) **API Reference:**[LanceDB](https://api.python.langchain.com/en/latest/vectorstores/langchain_community.vectorstores.lancedb.LanceDB.html) Similarity search[​](#similarity-search "Direct link to Similarity search") --------------------------------------------------------------------------- All vectorstores expose a `similarity_search` method. This will take incoming documents, create an embedding of them, and then find all documents with the most similar embedding. query = "What did the president say about Ketanji Brown Jackson"docs = db.similarity_search(query)print(docs[0].page_content) Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. ### Similarity search by vector[​](#similarity-search-by-vector "Direct link to Similarity search by vector") It is also possible to do a search for documents similar to a given embedding vector using `similarity_search_by_vector` which accepts an embedding vector as a parameter instead of a string. embedding_vector = OpenAIEmbeddings().embed_query(query)docs = db.similarity_search_by_vector(embedding_vector)print(docs[0].page_content) Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. Async Operations[​](#async-operations "Direct link to Async Operations") ------------------------------------------------------------------------ Vector stores are usually run as a separate service that requires some IO operations, and therefore they might be called asynchronously. That gives performance benefits as you don't waste time waiting for responses from external services. That might also be important if you work with an asynchronous framework, such as [FastAPI](https://fastapi.tiangolo.com/). LangChain supports async operation on vector stores. All the methods might be called using their async counterparts, with the prefix `a`, meaning `async`. docs = await db.asimilarity_search(query)docs [Document(page_content='Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', metadata={'source': 'state_of_the_union.txt'}), Document(page_content='A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \n\nAnd if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. \n\nWe can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. \n\nWe’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. \n\nWe’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. \n\nWe’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders.', metadata={'source': 'state_of_the_union.txt'}), Document(page_content='And for our LGBTQ+ Americans, let’s finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families is wrong. \n\nAs I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. \n\nWhile it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice. \n\nAnd soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. \n\nSo tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together. \n\nFirst, beat the opioid epidemic.', metadata={'source': 'state_of_the_union.txt'}), Document(page_content='Tonight, I’m announcing a crackdown on these companies overcharging American businesses and consumers. \n\nAnd as Wall Street firms take over more nursing homes, quality in those homes has gone down and costs have gone up. \n\nThat ends on my watch. \n\nMedicare is going to set higher standards for nursing homes and make sure your loved ones get the care they deserve and expect. \n\nWe’ll also cut costs and keep the economy going strong by giving workers a fair shot, provide more training and apprenticeships, hire them based on their skills not degrees. \n\nLet’s pass the Paycheck Fairness Act and paid leave. \n\nRaise the minimum wage to $15 an hour and extend the Child Tax Credit, so no one has to raise a family in poverty. \n\nLet’s increase Pell Grants and increase our historic support of HBCUs, and invest in what Jill—our First Lady who teaches full-time—calls America’s best-kept secret: community colleges.', metadata={'source': 'state_of_the_union.txt'})] [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/vectorstores.mdx) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to trim messages ](/v0.2/docs/how_to/trim_messages/)[ Next Conceptual guide ](/v0.2/docs/concepts/) * [Get started](#get-started) * [Similarity search](#similarity-search) * [Similarity search by vector](#similarity-search-by-vector) * [Async Operations](#async-operations)
null
https://python.langchain.com/v0.2/docs/how_to/passthrough/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to pass through arguments from one step to the next On this page How to pass through arguments from one step to the next ======================================================= Prerequisites This guide assumes familiarity with the following concepts: * [LangChain Expression Language (LCEL)](/v0.2/docs/concepts/#langchain-expression-language) * [Chaining runnables](/v0.2/docs/how_to/sequence/) * [Calling runnables in parallel](/v0.2/docs/how_to/parallel/) * [Custom functions](/v0.2/docs/how_to/functions/) When composing chains with several steps, sometimes you will want to pass data from previous steps unchanged for use as input to a later step. The [`RunnablePassthrough`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html) class allows you to do just this, and is typically is used in conjuction with a [RunnableParallel](/v0.2/docs/how_to/parallel/) to pass data through to a later step in your constructed chains. See the example below: %pip install -qU langchain langchain-openaiimport osfrom getpass import getpassos.environ["OPENAI_API_KEY"] = getpass() from langchain_core.runnables import RunnableParallel, RunnablePassthroughrunnable = RunnableParallel( passed=RunnablePassthrough(), modified=lambda x: x["num"] + 1,)runnable.invoke({"num": 1}) **API Reference:**[RunnableParallel](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html) | [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html) {'passed': {'num': 1}, 'modified': 2} As seen above, `passed` key was called with `RunnablePassthrough()` and so it simply passed on `{'num': 1}`. We also set a second key in the map with `modified`. This uses a lambda to set a single value adding 1 to the num, which resulted in `modified` key with the value of `2`. Retrieval Example[​](#retrieval-example "Direct link to Retrieval Example") --------------------------------------------------------------------------- In the example below, we see a more real-world use case where we use `RunnablePassthrough` along with `RunnableParallel` in a chain to properly format inputs to a prompt: from langchain_community.vectorstores import FAISSfrom langchain_core.output_parsers import StrOutputParserfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.runnables import RunnablePassthroughfrom langchain_openai import ChatOpenAI, OpenAIEmbeddingsvectorstore = FAISS.from_texts( ["harrison worked at kensho"], embedding=OpenAIEmbeddings())retriever = vectorstore.as_retriever()template = """Answer the question based only on the following context:{context}Question: {question}"""prompt = ChatPromptTemplate.from_template(template)model = ChatOpenAI()retrieval_chain = ( {"context": retriever, "question": RunnablePassthrough()} | prompt | model | StrOutputParser())retrieval_chain.invoke("where did harrison work?") **API Reference:**[FAISS](https://api.python.langchain.com/en/latest/vectorstores/langchain_community.vectorstores.faiss.FAISS.html) | [StrOutputParser](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.string.StrOutputParser.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) 'Harrison worked at Kensho.' Here the input to prompt is expected to be a map with keys "context" and "question". The user input is just the question. So we need to get the context using our retriever and passthrough the user input under the "question" key. The `RunnablePassthrough` allows us to pass on the user's question to the prompt and model. Next steps[​](#next-steps "Direct link to Next steps") ------------------------------------------------------ Now you've learned how to pass data through your chains to help to help format the data flowing through your chains. To learn more, see the other how-to guides on runnables in this section. [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/passthrough.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to use built-in tools and toolkits ](/v0.2/docs/how_to/tools_builtin/)[ Next How to compose prompts together ](/v0.2/docs/how_to/prompts_composition/) * [Retrieval Example](#retrieval-example) * [Next steps](#next-steps)
null
https://python.langchain.com/v0.2/docs/how_to/prompts_composition/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to compose prompts together On this page How to compose prompts together =============================== Prerequisites This guide assumes familiarity with the following concepts: * [Prompt templates](/v0.2/docs/concepts/#prompt-templates) LangChain provides a user friendly interface for composing different parts of prompts together. You can do this with either string prompts or chat prompts. Constructing prompts this way allows for easy reuse of components. String prompt composition[​](#string-prompt-composition "Direct link to String prompt composition") --------------------------------------------------------------------------------------------------- When working with string prompts, each template is joined together. You can work with either prompts directly or strings (the first element in the list needs to be a prompt). from langchain_core.prompts import PromptTemplateprompt = ( PromptTemplate.from_template("Tell me a joke about {topic}") + ", make it funny" + "\n\nand in {language}")prompt **API Reference:**[PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html) PromptTemplate(input_variables=['language', 'topic'], template='Tell me a joke about {topic}, make it funny\n\nand in {language}') prompt.format(topic="sports", language="spanish") 'Tell me a joke about sports, make it funny\n\nand in spanish' Chat prompt composition[​](#chat-prompt-composition "Direct link to Chat prompt composition") --------------------------------------------------------------------------------------------- A chat prompt is made up a of a list of messages. Similarly to the above example, we can concatenate chat prompt templates. Each new element is a new message in the final prompt. First, let's initialize the a [`ChatPromptTemplate`](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) with a [`SystemMessage`](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.system.SystemMessage.html). from langchain_core.messages import AIMessage, HumanMessage, SystemMessageprompt = SystemMessage(content="You are a nice pirate") **API Reference:**[AIMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html) | [HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) | [SystemMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.system.SystemMessage.html) You can then easily create a pipeline combining it with other messages _or_ message templates. Use a `Message` when there is no variables to be formatted, use a `MessageTemplate` when there are variables to be formatted. You can also use just a string (note: this will automatically get inferred as a [`HumanMessagePromptTemplate`](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.HumanMessagePromptTemplate.html).) new_prompt = ( prompt + HumanMessage(content="hi") + AIMessage(content="what?") + "{input}") Under the hood, this creates an instance of the ChatPromptTemplate class, so you can use it just as you did before! new_prompt.format_messages(input="i said hi") [SystemMessage(content='You are a nice pirate'), HumanMessage(content='hi'), AIMessage(content='what?'), HumanMessage(content='i said hi')] Using PipelinePrompt[​](#using-pipelineprompt "Direct link to Using PipelinePrompt") ------------------------------------------------------------------------------------ LangChain includes a class called [`PipelinePromptTemplate`](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.pipeline.PipelinePromptTemplate.html), which can be useful when you want to reuse parts of prompts. A PipelinePrompt consists of two main parts: * Final prompt: The final prompt that is returned * Pipeline prompts: A list of tuples, consisting of a string name and a prompt template. Each prompt template will be formatted and then passed to future prompt templates as a variable with the same name. from langchain_core.prompts import PipelinePromptTemplate, PromptTemplatefull_template = """{introduction}{example}{start}"""full_prompt = PromptTemplate.from_template(full_template)introduction_template = """You are impersonating {person}."""introduction_prompt = PromptTemplate.from_template(introduction_template)example_template = """Here's an example of an interaction:Q: {example_q}A: {example_a}"""example_prompt = PromptTemplate.from_template(example_template)start_template = """Now, do this for real!Q: {input}A:"""start_prompt = PromptTemplate.from_template(start_template)input_prompts = [ ("introduction", introduction_prompt), ("example", example_prompt), ("start", start_prompt),]pipeline_prompt = PipelinePromptTemplate( final_prompt=full_prompt, pipeline_prompts=input_prompts)pipeline_prompt.input_variables **API Reference:**[PipelinePromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.pipeline.PipelinePromptTemplate.html) | [PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html) ['person', 'example_a', 'example_q', 'input'] print( pipeline_prompt.format( person="Elon Musk", example_q="What's your favorite car?", example_a="Tesla", input="What's your favorite social media site?", )) You are impersonating Elon Musk.Here's an example of an interaction:Q: What's your favorite car?A: TeslaNow, do this for real!Q: What's your favorite social media site?A: Next steps[​](#next-steps "Direct link to Next steps") ------------------------------------------------------ You've now learned how to compose prompts together. Next, check out the other how-to guides on prompt templates in this section, like [adding few-shot examples to your prompt templates](/v0.2/docs/how_to/few_shot_examples_chat/). [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/prompts_composition.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to pass through arguments from one step to the next ](/v0.2/docs/how_to/passthrough/)[ Next How to handle multiple retrievers when doing query analysis ](/v0.2/docs/how_to/query_multiple_retrievers/) * [String prompt composition](#string-prompt-composition) * [Chat prompt composition](#chat-prompt-composition) * [Using PipelinePrompt](#using-pipelineprompt) * [Next steps](#next-steps)
null
https://python.langchain.com/v0.2/docs/versions/overview/
* [](/v0.2/) * Versions * Overview On this page LangChain over time =================== What’s new in LangChain?[​](#whats-new-in-langchain "Direct link to What’s new in LangChain?") ---------------------------------------------------------------------------------------------- The following features have been added during the development of 0.1.x: * Better streaming support via the [Event Streaming API](https://python.langchain.com/docs/expression_language/streaming/#using-stream-events). * [Standardized tool calling support](https://blog.langchain.dev/tool-calling-with-langchain/) * A standardized interface for [structuring output](https://github.com/langchain-ai/langchain/discussions/18154) * [@chain decorator](https://python.langchain.com/docs/expression_language/how_to/decorator/) to more easily create **RunnableLambdas** * [https://python.langchain.com/docs/expression\_language/how\_to/inspect/](https://python.langchain.com/docs/expression_language/how_to/inspect/) * In Python, better async support for many core abstractions (thank you [@cbornet](https://github.com/cbornet)!!) * Include response metadata in `AIMessage` to make it easy to access raw output from the underlying models * Tooling to visualize [your runnables](https://python.langchain.com/docs/expression_language/how_to/inspect/) or [your langgraph app](https://github.com/langchain-ai/langgraph/blob/main/examples/visualization.ipynb) * Interoperability of chat message histories across most providers * [Over 20+ partner packages in python](https://python.langchain.com/docs/integrations/platforms/) for popular integrations What’s coming to LangChain?[​](#whats-coming-to-langchain "Direct link to What’s coming to LangChain?") ------------------------------------------------------------------------------------------------------- * We’ve been working hard on [langgraph](https://langchain-ai.github.io/langgraph/). We will be building more capabilities on top of it and focusing on making it the go-to framework for agent architectures. * Vectorstores V2! We’ll be revisiting our vectorstores abstractions to help improve usability and reliability. * Better documentation and versioned docs! * We’re planning a breaking release (0.3.0) sometime between July-September to [upgrade to full support of Pydantic 2](https://github.com/langchain-ai/langchain/discussions/19339), and will drop support for Pydantic 1 (including objects originating from the `v1` namespace of Pydantic 2). What changed?[​](#what-changed "Direct link to What changed?") -------------------------------------------------------------- Due to the rapidly evolving field, LangChain has also evolved rapidly. This document serves to outline at a high level what has changed and why. ### TLDR[​](#tldr "Direct link to TLDR") **As of 0.2.0:** * This release completes the work that we started with release 0.1.0 by removing the dependency of `langchain` on `langchain-community`. * `langchain` package no longer requires `langchain-community` . Instead `langchain-community` will now depend on `langchain-core` and `langchain` . * User code that still relies on deprecated imports from `langchain` will continue to work as long `langchain_community` is installed. These imports will start raising errors in release 0.4.x. **As of 0.1.0:** * `langchain` was split into the following component packages: `langchain-core`, `langchain`, `langchain-community`, `langchain-[partner]` to improve the usability of langchain code in production settings. You can read more about it on our [blog](https://blog.langchain.dev/langchain-v0-1-0/). ### Ecosystem organization[​](#ecosystem-organization "Direct link to Ecosystem organization") By the release of 0.1.0, LangChain had grown to a large ecosystem with many integrations and a large community. To improve the usability of LangChain in production, we split the single `langchain` package into multiple packages. This allowed us to create a good foundation architecture for the LangChain ecosystem and improve the usability of `langchain` in production. Here is the high level break down of the Eco-system: * **langchain-core**: contains core abstractions involving LangChain Runnables, tooling for observability, and base implementations of important abstractions (e.g., Chat Models). * **langchain:** contains generic code that is built using interfaces defined in `langchain-core`. This package is for code that generalizes well across different implementations of specific interfaces. For example, `create_tool_calling_agent` works across chat models that support [tool calling capabilities](https://blog.langchain.dev/tool-calling-with-langchain/). * **langchain-community**: community maintained 3rd party integrations. Contains integrations based on interfaces defined in **langchain-core**. Maintained by the LangChain community. * **Partner Packages (e.g., langchain-\[partner\])**: Partner packages are packages dedicated to especially popular integrations (e.g., `langchain-openai`, `langchain-anthropic` etc.). The dedicated packages generally benefit from better reliability and support. * `langgraph`: Build robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph. * `langserve`: Deploy LangChain chains as REST APIs. In the 0.1.0 release, `langchain-community` was retained as required a dependency of `langchain`. This allowed imports of vectorstores, chat models, and other integrations to continue working through `langchain` rather than forcing users to update all of their imports to `langchain-community`. For the 0.2.0 release, we’re removing the dependency of `langchain` on `langchain-community`. This is something we’ve been planning to do since the 0.1 release because we believe this is the right package architecture. Old imports will continue to work as long as `langchain-community` is installed. These imports will be removed in the 0.4.0 release. To understand why we think breaking the dependency of `langchain` on `langchain-community` is best we should understand what each package is meant to do. `langchain` is meant to contain high-level chains and agent architectures. The logic in these should be specified at the level of abstractions like `ChatModel` and `Retriever`, and should not be specific to any one integration. This has two main benefits: 1. `langchain` is fairly lightweight. Here is the full list of required dependencies (after the split) python = ">=3.8.1,<4.0"langchain-core = "^0.2.0"langchain-text-splitters = ">=0.0.1,<0.1"langsmith = "^0.1.17"pydantic = ">=1,<3"SQLAlchemy = ">=1.4,<3"requests = "^2"PyYAML = ">=5.3"numpy = "^1"aiohttp = "^3.8.3"tenacity = "^8.1.0"jsonpatch = "^1.33" 2. `langchain` chains/agents are largely integration-agnostic, which makes it easy to experiment with different integrations and future-proofs your code should there be issues with one specific integration. There is also a third less tangible benefit which is that being integration-agnostic forces us to find only those very generic abstractions and architectures which generalize well across integrations. Given how general the abilities of the foundational tech are, and how quickly the space is moving, having generic architectures is a good way of future-proofing your applications. `langchain-community` is intended to have all integration-specific components that are not yet being maintained in separate `langchain-{partner}` packages. Today this is still the majority of integrations and a lot of code. This code is primarily contributed by the community, while `langchain` is largely written by core maintainers. All of these integrations use optional dependencies and conditional imports, which prevents dependency bloat and conflicts but means compatible dependency versions are not made explicit. Given the volume of integrations in `langchain-community` and the speed at which integrations change, it’s very hard to follow semver versioning, and we currently don’t. All of which is to say that there’s no large benefits to `langchain` depending on `langchain-community` and some obvious downsides: the functionality in `langchain` should be integration agnostic anyways, `langchain-community` can’t be properly versioned, and depending on `langchain-community` increases the [vulnerability surface](https://github.com/langchain-ai/langchain/discussions/19083) of `langchain`. For more context about the reason for the organization please see our blog: [https://blog.langchain.dev/langchain-v0-1-0/](https://blog.langchain.dev/langchain-v0-1-0/) [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/versions/overview.mdx) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous 🦜️🏓 LangServe ](/v0.2/docs/langserve/)[ Next Release Policy ](/v0.2/docs/versions/release_policy/) * [What’s new in LangChain?](#whats-new-in-langchain) * [What’s coming to LangChain?](#whats-coming-to-langchain) * [What changed?](#what-changed) * [TLDR](#tldr) * [Ecosystem organization](#ecosystem-organization)
null
https://python.langchain.com/v0.2/docs/concepts/
* [](/v0.2/) * Conceptual guide On this page Conceptual guide ================ This section contains introductions to key parts of LangChain. Architecture[​](#architecture "Direct link to Architecture") ------------------------------------------------------------ LangChain as a framework consists of a number of packages. ### `langchain-core`[​](#langchain-core "Direct link to langchain-core") This package contains base abstractions of different components and ways to compose them together. The interfaces for core components like LLMs, vector stores, retrievers and more are defined here. No third party integrations are defined here. The dependencies are kept purposefully very lightweight. ### Partner packages[​](#partner-packages "Direct link to Partner packages") While the long tail of integrations are in `langchain-community`, we split popular integrations into their own packages (e.g. `langchain-openai`, `langchain-anthropic`, etc). This was done in order to improve support for these important integrations. ### `langchain`[​](#langchain "Direct link to langchain") The main `langchain` package contains chains, agents, and retrieval strategies that make up an application's cognitive architecture. These are NOT third party integrations. All chains, agents, and retrieval strategies here are NOT specific to any one integration, but rather generic across all integrations. ### `langchain-community`[​](#langchain-community "Direct link to langchain-community") This package contains third party integrations that are maintained by the LangChain community. Key partner packages are separated out (see below). This contains all integrations for various components (LLMs, vector stores, retrievers). All dependencies in this package are optional to keep the package as lightweight as possible. ### [`langgraph`](https://langchain-ai.github.io/langgraph)[​](#langgraph "Direct link to langgraph") `langgraph` is an extension of `langchain` aimed at building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph. LangGraph exposes high level interfaces for creating common types of agents, as well as a low-level API for composing custom flows. ### [`langserve`](/v0.2/docs/langserve/)[​](#langserve "Direct link to langserve") A package to deploy LangChain chains as REST APIs. Makes it easy to get a production ready API up and running. ### [LangSmith](https://docs.smith.langchain.com)[​](#langsmith "Direct link to langsmith") A developer platform that lets you debug, test, evaluate, and monitor LLM applications. ![Diagram outlining the hierarchical organization of the LangChain framework, displaying the interconnected parts across multiple layers.](/v0.2/svg/langchain_stack.svg "LangChain Framework Overview")![Diagram outlining the hierarchical organization of the LangChain framework, displaying the interconnected parts across multiple layers.](/v0.2/svg/langchain_stack_dark.svg "LangChain Framework Overview") LangChain Expression Language (LCEL)[​](#langchain-expression-language-lcel "Direct link to LangChain Expression Language (LCEL)") ---------------------------------------------------------------------------------------------------------------------------------- LangChain Expression Language, or LCEL, is a declarative way to chain LangChain components. LCEL was designed from day 1 to **support putting prototypes in production, with no code changes**, from the simplest “prompt + LLM” chain to the most complex chains (we’ve seen folks successfully run LCEL chains with 100s of steps in production). To highlight a few of the reasons you might want to use LCEL: **First-class streaming support** When you build your chains with LCEL you get the best possible time-to-first-token (time elapsed until the first chunk of output comes out). For some chains this means eg. we stream tokens straight from an LLM to a streaming output parser, and you get back parsed, incremental chunks of output at the same rate as the LLM provider outputs the raw tokens. **Async support** Any chain built with LCEL can be called both with the synchronous API (eg. in your Jupyter notebook while prototyping) as well as with the asynchronous API (eg. in a [LangServe](/v0.2/docs/langserve/) server). This enables using the same code for prototypes and in production, with great performance, and the ability to handle many concurrent requests in the same server. **Optimized parallel execution** Whenever your LCEL chains have steps that can be executed in parallel (eg if you fetch documents from multiple retrievers) we automatically do it, both in the sync and the async interfaces, for the smallest possible latency. **Retries and fallbacks** Configure retries and fallbacks for any part of your LCEL chain. This is a great way to make your chains more reliable at scale. We’re currently working on adding streaming support for retries/fallbacks, so you can get the added reliability without any latency cost. **Access intermediate results** For more complex chains it’s often very useful to access the results of intermediate steps even before the final output is produced. This can be used to let end-users know something is happening, or even just to debug your chain. You can stream intermediate results, and it’s available on every [LangServe](/v0.2/docs/langserve/) server. **Input and output schemas** Input and output schemas give every LCEL chain Pydantic and JSONSchema schemas inferred from the structure of your chain. This can be used for validation of inputs and outputs, and is an integral part of LangServe. [**Seamless LangSmith tracing**](https://docs.smith.langchain.com) As your chains get more and more complex, it becomes increasingly important to understand what exactly is happening at every step. With LCEL, **all** steps are automatically logged to [LangSmith](https://docs.smith.langchain.com/) for maximum observability and debuggability. [**Seamless LangServe deployment**](/v0.2/docs/langserve/) Any chain created with LCEL can be easily deployed using [LangServe](/v0.2/docs/langserve/). ### Runnable interface[​](#runnable-interface "Direct link to Runnable interface") To make it as easy as possible to create custom chains, we've implemented a ["Runnable"](https://api.python.langchain.com/en/stable/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable) protocol. Many LangChain components implement the `Runnable` protocol, including chat models, LLMs, output parsers, retrievers, prompt templates, and more. There are also several useful primitives for working with runnables, which you can read about below. This is a standard interface, which makes it easy to define custom chains as well as invoke them in a standard way. The standard interface includes: * `stream`: stream back chunks of the response * `invoke`: call the chain on an input * `batch`: call the chain on a list of inputs These also have corresponding async methods that should be used with [asyncio](https://docs.python.org/3/library/asyncio.html) `await` syntax for concurrency: * `astream`: stream back chunks of the response async * `ainvoke`: call the chain on an input async * `abatch`: call the chain on a list of inputs async * `astream_log`: stream back intermediate steps as they happen, in addition to the final response * `astream_events`: **beta** stream events as they happen in the chain (introduced in `langchain-core` 0.1.14) The **input type** and **output type** varies by component: Component Input Type Output Type Prompt Dictionary PromptValue ChatModel Single string, list of chat messages or a PromptValue ChatMessage LLM Single string, list of chat messages or a PromptValue String OutputParser The output of an LLM or ChatModel Depends on the parser Retriever Single string List of Documents Tool Single string or dictionary, depending on the tool Depends on the tool All runnables expose input and output **schemas** to inspect the inputs and outputs: * `input_schema`: an input Pydantic model auto-generated from the structure of the Runnable * `output_schema`: an output Pydantic model auto-generated from the structure of the Runnable Components[​](#components "Direct link to Components") ------------------------------------------------------ LangChain provides standard, extendable interfaces and external integrations for various components useful for building with LLMs. Some components LangChain implements, some components we rely on third-party integrations for, and others are a mix. ### Chat models[​](#chat-models "Direct link to Chat models") Language models that use a sequence of messages as inputs and return chat messages as outputs (as opposed to using plain text). These are traditionally newer models (older models are generally `LLMs`, see below). Chat models support the assignment of distinct roles to conversation messages, helping to distinguish messages from the AI, users, and instructions such as system messages. Although the underlying models are messages in, message out, the LangChain wrappers also allow these models to take a string as input. This means you can easily use chat models in place of LLMs. When a string is passed in as input, it is converted to a `HumanMessage` and then passed to the underlying model. LangChain does not host any Chat Models, rather we rely on third party integrations. We have some standardized parameters when constructing ChatModels: * `model`: the name of the model * `temperature`: the sampling temperature * `timeout`: request timeout * `max_tokens`: max tokens to generate * `stop`: default stop sequences * `max_retries`: max number of times to retry requests * `api_key`: API key for the model provider * `base_url`: endpoint to send requests to Some important things to note: * standard params only apply to model providers that expose parameters with the intended functionality. For example, some providers do not expose a configuration for maximum output tokens, so max\_tokens can't be supported on these. * standard params are currently only enforced on integrations that have their own integration packages (e.g. `langchain-openai`, `langchain-anthropic`, etc.), they're not enforced on models in `langchain-community`. ChatModels also accept other parameters that are specific to that integration. To find all the parameters supported by a ChatModel head to the API reference for that model. info **Tool Calling** Some chat models have been fine-tuned for tool calling and provide a dedicated API for tool calling. Generally, such models are better at tool calling than non-fine-tuned models, and are recommended for use cases that require tool calling. Please see the [tool calling section](/v0.2/docs/concepts/#functiontool-calling) for more information. For specifics on how to use chat models, see the [relevant how-to guides here](/v0.2/docs/how_to/#chat-models). #### Multimodality[​](#multimodality "Direct link to Multimodality") Some chat models are multimodal, accepting images, audio and even video as inputs. These are still less common, meaning model providers haven't standardized on the "best" way to define the API. Multimodal **outputs** are even less common. As such, we've kept our multimodal abstractions fairly light weight and plan to further solidify the multimodal APIs and interaction patterns as the field matures. In LangChain, most chat models that support multimodal inputs also accept those values in OpenAI's content blocks format. So far this is restricted to image inputs. For models like Gemini which support video and other bytes input, the APIs also support the native, model-specific representations. For specifics on how to use multimodal models, see the [relevant how-to guides here](/v0.2/docs/how_to/#multimodal). For a full list of LangChain model providers with multimodal models, [check out this table](/v0.2/docs/integrations/chat/#advanced-features). ### LLMs[​](#llms "Direct link to LLMs") caution Pure text-in/text-out LLMs tend to be older or lower-level. Many popular models are best used as [chat completion models](/v0.2/docs/concepts/#chat-models), even for non-chat use cases. You are probably looking for [the section above instead](/v0.2/docs/concepts/#chat-models). Language models that takes a string as input and returns a string. These are traditionally older models (newer models generally are [Chat Models](/v0.2/docs/concepts/#chat-models), see above). Although the underlying models are string in, string out, the LangChain wrappers also allow these models to take messages as input. This gives them the same interface as [Chat Models](/v0.2/docs/concepts/#chat-models). When messages are passed in as input, they will be formatted into a string under the hood before being passed to the underlying model. LangChain does not host any LLMs, rather we rely on third party integrations. For specifics on how to use LLMs, see the [relevant how-to guides here](/v0.2/docs/how_to/#llms). ### Messages[​](#messages "Direct link to Messages") Some language models take a list of messages as input and return a message. There are a few different types of messages. All messages have a `role`, `content`, and `response_metadata` property. The `role` describes WHO is saying the message. LangChain has different message classes for different roles. The `content` property describes the content of the message. This can be a few different things: * A string (most models deal this type of content) * A List of dictionaries (this is used for multimodal input, where the dictionary contains information about that input type and that input location) #### HumanMessage[​](#humanmessage "Direct link to HumanMessage") This represents a message from the user. #### AIMessage[​](#aimessage "Direct link to AIMessage") This represents a message from the model. In addition to the `content` property, these messages also have: **`response_metadata`** The `response_metadata` property contains additional metadata about the response. The data here is often specific to each model provider. This is where information like log-probs and token usage may be stored. **`tool_calls`** These represent a decision from an language model to call a tool. They are included as part of an `AIMessage` output. They can be accessed from there with the `.tool_calls` property. This property returns a list of dictionaries. Each dictionary has the following keys: * `name`: The name of the tool that should be called. * `args`: The arguments to that tool. * `id`: The id of that tool call. #### SystemMessage[​](#systemmessage "Direct link to SystemMessage") This represents a system message, which tells the model how to behave. Not every model provider supports this. #### FunctionMessage[​](#functionmessage "Direct link to FunctionMessage") This represents the result of a function call. In addition to `role` and `content`, this message has a `name` parameter which conveys the name of the function that was called to produce this result. #### ToolMessage[​](#toolmessage "Direct link to ToolMessage") This represents the result of a tool call. This is distinct from a FunctionMessage in order to match OpenAI's `function` and `tool` message types. In addition to `role` and `content`, this message has a `tool_call_id` parameter which conveys the id of the call to the tool that was called to produce this result. ### Prompt templates[​](#prompt-templates "Direct link to Prompt templates") Prompt templates help to translate user input and parameters into instructions for a language model. This can be used to guide a model's response, helping it understand the context and generate relevant and coherent language-based output. Prompt Templates take as input a dictionary, where each key represents a variable in the prompt template to fill in. Prompt Templates output a PromptValue. This PromptValue can be passed to an LLM or a ChatModel, and can also be cast to a string or a list of messages. The reason this PromptValue exists is to make it easy to switch between strings and messages. There are a few different types of prompt templates: #### String PromptTemplates[​](#string-prompttemplates "Direct link to String PromptTemplates") These prompt templates are used to format a single string, and generally are used for simpler inputs. For example, a common way to construct and use a PromptTemplate is as follows: from langchain_core.prompts import PromptTemplateprompt_template = PromptTemplate.from_template("Tell me a joke about {topic}")prompt_template.invoke({"topic": "cats"}) **API Reference:**[PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html) #### ChatPromptTemplates[​](#chatprompttemplates "Direct link to ChatPromptTemplates") These prompt templates are used to format a list of messages. These "templates" consist of a list of templates themselves. For example, a common way to construct and use a ChatPromptTemplate is as follows: from langchain_core.prompts import ChatPromptTemplateprompt_template = ChatPromptTemplate.from_messages([ ("system", "You are a helpful assistant"), ("user", "Tell me a joke about {topic}")])prompt_template.invoke({"topic": "cats"}) **API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) In the above example, this ChatPromptTemplate will construct two messages when called. The first is a system message, that has no variables to format. The second is a HumanMessage, and will be formatted by the `topic` variable the user passes in. #### MessagesPlaceholder[​](#messagesplaceholder "Direct link to MessagesPlaceholder") This prompt template is responsible for adding a list of messages in a particular place. In the above ChatPromptTemplate, we saw how we could format two messages, each one a string. But what if we wanted the user to pass in a list of messages that we would slot into a particular spot? This is how you use MessagesPlaceholder. from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholderfrom langchain_core.messages import HumanMessageprompt_template = ChatPromptTemplate.from_messages([ ("system", "You are a helpful assistant"), MessagesPlaceholder("msgs")])prompt_template.invoke({"msgs": [HumanMessage(content="hi!")]}) **API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [MessagesPlaceholder](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.MessagesPlaceholder.html) | [HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) This will produce a list of two messages, the first one being a system message, and the second one being the HumanMessage we passed in. If we had passed in 5 messages, then it would have produced 6 messages in total (the system message plus the 5 passed in). This is useful for letting a list of messages be slotted into a particular spot. An alternative way to accomplish the same thing without using the `MessagesPlaceholder` class explicitly is: prompt_template = ChatPromptTemplate.from_messages([ ("system", "You are a helpful assistant"), ("placeholder", "{msgs}") # <-- This is the changed part]) For specifics on how to use prompt templates, see the [relevant how-to guides here](/v0.2/docs/how_to/#prompt-templates). ### Example selectors[​](#example-selectors "Direct link to Example selectors") One common prompting technique for achieving better performance is to include examples as part of the prompt. This gives the language model concrete examples of how it should behave. Sometimes these examples are hardcoded into the prompt, but for more advanced situations it may be nice to dynamically select them. Example Selectors are classes responsible for selecting and then formatting examples into prompts. For specifics on how to use example selectors, see the [relevant how-to guides here](/v0.2/docs/how_to/#example-selectors). ### Output parsers[​](#output-parsers "Direct link to Output parsers") note The information here refers to parsers that take a text output from a model try to parse it into a more structured representation. More and more models are supporting function (or tool) calling, which handles this automatically. It is recommended to use function/tool calling rather than output parsing. See documentation for that [here](/v0.2/docs/concepts/#function-tool-calling). Responsible for taking the output of a model and transforming it to a more suitable format for downstream tasks. Useful when you are using LLMs to generate structured data, or to normalize output from chat models and LLMs. LangChain has lots of different types of output parsers. This is a list of output parsers LangChain supports. The table below has various pieces of information: **Name**: The name of the output parser **Supports Streaming**: Whether the output parser supports streaming. **Has Format Instructions**: Whether the output parser has format instructions. This is generally available except when (a) the desired schema is not specified in the prompt but rather in other parameters (like OpenAI function calling), or (b) when the OutputParser wraps another OutputParser. **Calls LLM**: Whether this output parser itself calls an LLM. This is usually only done by output parsers that attempt to correct misformatted output. **Input Type**: Expected input type. Most output parsers work on both strings and messages, but some (like OpenAI Functions) need a message with specific kwargs. **Output Type**: The output type of the object returned by the parser. **Description**: Our commentary on this output parser and when to use it. Name Supports Streaming Has Format Instructions Calls LLM Input Type Output Type Description [JSON](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.json.JsonOutputParser.html#langchain_core.output_parsers.json.JsonOutputParser) ✅ ✅ `str` | `Message` JSON object Returns a JSON object as specified. You can specify a Pydantic model and it will return JSON for that model. Probably the most reliable output parser for getting structured data that does NOT use function calling. [XML](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.xml.XMLOutputParser.html#langchain_core.output_parsers.xml.XMLOutputParser) ✅ ✅ `str` | `Message` `dict` Returns a dictionary of tags. Use when XML output is needed. Use with models that are good at writing XML (like Anthropic's). [CSV](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.list.CommaSeparatedListOutputParser.html#langchain_core.output_parsers.list.CommaSeparatedListOutputParser) ✅ ✅ `str` | `Message` `List[str]` Returns a list of comma separated values. [OutputFixing](https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.fix.OutputFixingParser.html#langchain.output_parsers.fix.OutputFixingParser) ✅ `str` | `Message` Wraps another output parser. If that output parser errors, then this will pass the error message and the bad output to an LLM and ask it to fix the output. [RetryWithError](https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.retry.RetryWithErrorOutputParser.html#langchain.output_parsers.retry.RetryWithErrorOutputParser) ✅ `str` | `Message` Wraps another output parser. If that output parser errors, then this will pass the original inputs, the bad output, and the error message to an LLM and ask it to fix it. Compared to OutputFixingParser, this one also sends the original instructions. [Pydantic](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.pydantic.PydanticOutputParser.html#langchain_core.output_parsers.pydantic.PydanticOutputParser) ✅ `str` | `Message` `pydantic.BaseModel` Takes a user defined Pydantic model and returns data in that format. [YAML](https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.yaml.YamlOutputParser.html#langchain.output_parsers.yaml.YamlOutputParser) ✅ `str` | `Message` `pydantic.BaseModel` Takes a user defined Pydantic model and returns data in that format. Uses YAML to encode it. [PandasDataFrame](https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.pandas_dataframe.PandasDataFrameOutputParser.html#langchain.output_parsers.pandas_dataframe.PandasDataFrameOutputParser) ✅ `str` | `Message` `dict` Useful for doing operations with pandas DataFrames. [Enum](https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.enum.EnumOutputParser.html#langchain.output_parsers.enum.EnumOutputParser) ✅ `str` | `Message` `Enum` Parses response into one of the provided enum values. [Datetime](https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.datetime.DatetimeOutputParser.html#langchain.output_parsers.datetime.DatetimeOutputParser) ✅ `str` | `Message` `datetime.datetime` Parses response into a datetime string. [Structured](https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.structured.StructuredOutputParser.html#langchain.output_parsers.structured.StructuredOutputParser) ✅ `str` | `Message` `Dict[str, str]` An output parser that returns structured information. It is less powerful than other output parsers since it only allows for fields to be strings. This can be useful when you are working with smaller LLMs. For specifics on how to use output parsers, see the [relevant how-to guides here](/v0.2/docs/how_to/#output-parsers). ### Chat history[​](#chat-history "Direct link to Chat history") Most LLM applications have a conversational interface. An essential component of a conversation is being able to refer to information introduced earlier in the conversation. At bare minimum, a conversational system should be able to access some window of past messages directly. The concept of `ChatHistory` refers to a class in LangChain which can be used to wrap an arbitrary chain. This `ChatHistory` will keep track of inputs and outputs of the underlying chain, and append them as messages to a message database. Future interactions will then load those messages and pass them into the chain as part of the input. ### Documents[​](#documents "Direct link to Documents") A Document object in LangChain contains information about some data. It has two attributes: * `page_content: str`: The content of this document. Currently is only a string. * `metadata: dict`: Arbitrary metadata associated with this document. Can track the document id, file name, etc. ### Document loaders[​](#document-loaders "Direct link to Document loaders") These classes load Document objects. LangChain has hundreds of integrations with various data sources to load data from: Slack, Notion, Google Drive, etc. Each DocumentLoader has its own specific parameters, but they can all be invoked in the same way with the `.load` method. An example use case is as follows: from langchain_community.document_loaders.csv_loader import CSVLoaderloader = CSVLoader( ... # <-- Integration specific parameters here)data = loader.load() **API Reference:**[CSVLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.csv_loader.CSVLoader.html) For specifics on how to use document loaders, see the [relevant how-to guides here](/v0.2/docs/how_to/#document-loaders). ### Text splitters[​](#text-splitters "Direct link to Text splitters") Once you've loaded documents, you'll often want to transform them to better suit your application. The simplest example is you may want to split a long document into smaller chunks that can fit into your model's context window. LangChain has a number of built-in document transformers that make it easy to split, combine, filter, and otherwise manipulate documents. When you want to deal with long pieces of text, it is necessary to split up that text into chunks. As simple as this sounds, there is a lot of potential complexity here. Ideally, you want to keep the semantically related pieces of text together. What "semantically related" means could depend on the type of text. This notebook showcases several ways to do that. At a high level, text splitters work as following: 1. Split the text up into small, semantically meaningful chunks (often sentences). 2. Start combining these small chunks into a larger chunk until you reach a certain size (as measured by some function). 3. Once you reach that size, make that chunk its own piece of text and then start creating a new chunk of text with some overlap (to keep context between chunks). That means there are two different axes along which you can customize your text splitter: 1. How the text is split 2. How the chunk size is measured For specifics on how to use text splitters, see the [relevant how-to guides here](/v0.2/docs/how_to/#text-splitters). ### Embedding models[​](#embedding-models "Direct link to Embedding models") Embedding models create a vector representation of a piece of text. You can think of a vector as an array of numbers that captures the semantic meaning of the text. By representing the text in this way, you can perform mathematical operations that allow you to do things like search for other pieces of text that are most similar in meaning. These natural language search capabilities underpin many types of [context retrieval](/v0.2/docs/concepts/#retrieval), where we provide an LLM with the relevant data it needs to effectively respond to a query. ![](/v0.2/assets/images/embeddings-9c2616450a3b4f497a2d95a696b5f1a7.png) The `Embeddings` class is a class designed for interfacing with text embedding models. There are many different embedding model providers (OpenAI, Cohere, Hugging Face, etc) and local models, and this class is designed to provide a standard interface for all of them. The base Embeddings class in LangChain provides two methods: one for embedding documents and one for embedding a query. The former takes as input multiple texts, while the latter takes a single text. The reason for having these as two separate methods is that some embedding providers have different embedding methods for documents (to be searched over) vs queries (the search query itself). For specifics on how to use embedding models, see the [relevant how-to guides here](/v0.2/docs/how_to/#embedding-models). ### Vector stores[​](#vector-stores "Direct link to Vector stores") One of the most common ways to store and search over unstructured data is to embed it and store the resulting embedding vectors, and then at query time to embed the unstructured query and retrieve the embedding vectors that are 'most similar' to the embedded query. A vector store takes care of storing embedded data and performing vector search for you. Most vector stores can also store metadata about embedded vectors and support filtering on that metadata before similarity search, allowing you more control over returned documents. Vector stores can be converted to the retriever interface by doing: vectorstore = MyVectorStore()retriever = vectorstore.as_retriever() For specifics on how to use vector stores, see the [relevant how-to guides here](/v0.2/docs/how_to/#vector-stores). ### Retrievers[​](#retrievers "Direct link to Retrievers") A retriever is an interface that returns documents given an unstructured query. It is more general than a vector store. A retriever does not need to be able to store documents, only to return (or retrieve) them. Retrievers can be created from vector stores, but are also broad enough to include [Wikipedia search](/v0.2/docs/integrations/retrievers/wikipedia/) and [Amazon Kendra](/v0.2/docs/integrations/retrievers/amazon_kendra_retriever/). Retrievers accept a string query as input and return a list of Document's as output. For specifics on how to use retrievers, see the [relevant how-to guides here](/v0.2/docs/how_to/#retrievers). ### Tools[​](#tools "Direct link to Tools") Tools are interfaces that an agent, a chain, or a chat model / LLM can use to interact with the world. A tool consists of the following components: 1. The name of the tool 2. A description of what the tool does 3. JSON schema of what the inputs to the tool are 4. The function to call 5. Whether the result of a tool should be returned directly to the user (only relevant for agents) The name, description and JSON schema are provided as context to the LLM, allowing the LLM to determine how to use the tool appropriately. Given a list of available tools and a prompt, an LLM can request that one or more tools be invoked with appropriate arguments. Generally, when designing tools to be used by a chat model or LLM, it is important to keep in mind the following: * Chat models that have been fine-tuned for tool calling will be better at tool calling than non-fine-tuned models. * Non fine-tuned models may not be able to use tools at all, especially if the tools are complex or require multiple tool calls. * Models will perform better if the tools have well-chosen names, descriptions, and JSON schemas. * Simpler tools are generally easier for models to use than more complex tools. For specifics on how to use tools, see the [relevant how-to guides here](/v0.2/docs/how_to/#tools). ### Toolkits[​](#toolkits "Direct link to Toolkits") Toolkits are collections of tools that are designed to be used together for specific tasks. They have convenient loading methods. All Toolkits expose a `get_tools` method which returns a list of tools. You can therefore do: # Initialize a toolkittoolkit = ExampleTookit(...)# Get list of toolstools = toolkit.get_tools() ### Agents[​](#agents "Direct link to Agents") By themselves, language models can't take actions - they just output text. A big use case for LangChain is creating **agents**. Agents are systems that use an LLM as a reasoning engine to determine which actions to take and what the inputs to those actions should be. The results of those actions can then be fed back into the agent and it determine whether more actions are needed, or whether it is okay to finish. [LangGraph](https://github.com/langchain-ai/langgraph) is an extension of LangChain specifically aimed at creating highly controllable and customizable agents. Please check out that documentation for a more in depth overview of agent concepts. There is a legacy agent concept in LangChain that we are moving towards deprecating: `AgentExecutor`. AgentExecutor was essentially a runtime for agents. It was a great place to get started, however, it was not flexible enough as you started to have more customized agents. In order to solve that we built LangGraph to be this flexible, highly-controllable runtime. If you are still using AgentExecutor, do not fear: we still have a guide on [how to use AgentExecutor](/v0.2/docs/how_to/agent_executor/). It is recommended, however, that you start to transition to LangGraph. In order to assist in this we have put together a [transition guide on how to do so](/v0.2/docs/how_to/migrate_agent/). ### Callbacks[​](#callbacks "Direct link to Callbacks") LangChain provides a callbacks system that allows you to hook into the various stages of your LLM application. This is useful for logging, monitoring, streaming, and other tasks. You can subscribe to these events by using the `callbacks` argument available throughout the API. This argument is list of handler objects, which are expected to implement one or more of the methods described below in more detail. #### Callback Events[​](#callback-events "Direct link to Callback Events") Event Event Trigger Associated Method Chat model start When a chat model starts `on_chat_model_start` LLM start When a llm starts `on_llm_start` LLM new token When an llm OR chat model emits a new token `on_llm_new_token` LLM ends When an llm OR chat model ends `on_llm_end` LLM errors When an llm OR chat model errors `on_llm_error` Chain start When a chain starts running `on_chain_start` Chain end When a chain ends `on_chain_end` Chain error When a chain errors `on_chain_error` Tool start When a tool starts running `on_tool_start` Tool end When a tool ends `on_tool_end` Tool error When a tool errors `on_tool_error` Agent action When an agent takes an action `on_agent_action` Agent finish When an agent ends `on_agent_finish` Retriever start When a retriever starts `on_retriever_start` Retriever end When a retriever ends `on_retriever_end` Retriever error When a retriever errors `on_retriever_error` Text When arbitrary text is run `on_text` Retry When a retry event is run `on_retry` #### Callback handlers[​](#callback-handlers "Direct link to Callback handlers") Callback handlers can either be `sync` or `async`: * Sync callback handlers implement the [BaseCallbackHandler](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html) interface. * Async callback handlers implement the [AsyncCallbackHandler](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.AsyncCallbackHandler.html) interface. During run-time LangChain configures an appropriate callback manager (e.g., [CallbackManager](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.CallbackManager.html) or [AsyncCallbackManager](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.AsyncCallbackManager.html) which will be responsible for calling the appropriate method on each "registered" callback handler when the event is triggered. #### Passing callbacks[​](#passing-callbacks "Direct link to Passing callbacks") The `callbacks` property is available on most objects throughout the API (Models, Tools, Agents, etc.) in two different places: The callbacks are available on most objects throughout the API (Models, Tools, Agents, etc.) in two different places: * **Request time callbacks**: Passed at the time of the request in addition to the input data. Available on all standard `Runnable` objects. These callbacks are INHERITED by all children of the object they are defined on. For example, `chain.invoke({"number": 25}, {"callbacks": [handler]})`. * **Constructor callbacks**: `chain = TheNameOfSomeChain(callbacks=[handler])`. These callbacks are passed as arguments to the constructor of the object. The callbacks are scoped only to the object they are defined on, and are **not** inherited by any children of the object. danger Constructor callbacks are scoped only to the object they are defined on. They are **not** inherited by children of the object. If you're creating a custom chain or runnable, you need to remember to propagate request time callbacks to any child objects. Async in Python<=3.10 Any `RunnableLambda`, a `RunnableGenerator`, or `Tool` that invokes other runnables and is running async in python<=3.10, will have to propagate callbacks to child objects manually. This is because LangChain cannot automatically propagate callbacks to child objects in this case. This is a common reason why you may fail to see events being emitted from custom runnables or tools. For specifics on how to use callbacks, see the [relevant how-to guides here](/v0.2/docs/how_to/#callbacks). Techniques[​](#techniques "Direct link to Techniques") ------------------------------------------------------ ### Streaming[​](#streaming "Direct link to Streaming") Individual LLM calls often run for much longer than traditional resource requests. This compounds when you build more complex chains or agents that require multiple reasoning steps. Fortunately, LLMs generate output iteratively, which means it's possible to show sensible intermediate results before the final response is ready. Consuming output as soon as it becomes available has therefore become a vital part of the UX around building apps with LLMs to help alleviate latency issues, and LangChain aims to have first-class support for streaming. Below, we'll discuss some concepts and considerations around streaming in LangChain. #### `.stream()` and `.astream()`[​](#stream-and-astream "Direct link to stream-and-astream") Most modules in LangChain include the `.stream()` method (and the equivalent `.astream()` method for [async](https://docs.python.org/3/library/asyncio.html) environments) as an ergonomic streaming interface. `.stream()` returns an iterator, which you can consume with a simple `for` loop. Here's an example with a chat model: from langchain_anthropic import ChatAnthropicmodel = ChatAnthropic(model="claude-3-sonnet-20240229")for chunk in model.stream("what color is the sky?"): print(chunk.content, end="|", flush=True) **API Reference:**[ChatAnthropic](https://api.python.langchain.com/en/latest/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html) For models (or other components) that don't support streaming natively, this iterator would just yield a single chunk, but you could still use the same general pattern when calling them. Using `.stream()` will also automatically call the model in streaming mode without the need to provide additional config. The type of each outputted chunk depends on the type of component - for example, chat models yield [`AIMessageChunks`](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html). Because this method is part of [LangChain Expression Language](/v0.2/docs/concepts/#langchain-expression-language-lcel), you can handle formatting differences from different outputs using an [output parser](/v0.2/docs/concepts/#output-parsers) to transform each yielded chunk. You can check out [this guide](/v0.2/docs/how_to/streaming/#using-stream) for more detail on how to use `.stream()`. #### `.astream_events()`[​](#astream_events "Direct link to astream_events") While the `.stream()` method is intuitive, it can only return the final generated value of your chain. This is fine for single LLM calls, but as you build more complex chains of several LLM calls together, you may want to use the intermediate values of the chain alongside the final output - for example, returning sources alongside the final generation when building a chat over documents app. There are ways to do this [using callbacks](/v0.2/docs/concepts/#callbacks-1), or by constructing your chain in such a way that it passes intermediate values to the end with something like chained [`.assign()`](/v0.2/docs/how_to/passthrough/) calls, but LangChain also includes an `.astream_events()` method that combines the flexibility of callbacks with the ergonomics of `.stream()`. When called, it returns an iterator which yields [various types of events](/v0.2/docs/how_to/streaming/#event-reference) that you can filter and process according to the needs of your project. Here's one small example that prints just events containing streamed chat model output: from langchain_core.output_parsers import StrOutputParserfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_anthropic import ChatAnthropicmodel = ChatAnthropic(model="claude-3-sonnet-20240229")prompt = ChatPromptTemplate.from_template("tell me a joke about {topic}")parser = StrOutputParser()chain = prompt | model | parserasync for event in chain.astream_events({"topic": "parrot"}, version="v2"): kind = event["event"] if kind == "on_chat_model_stream": print(event, end="|", flush=True) **API Reference:**[StrOutputParser](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.string.StrOutputParser.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [ChatAnthropic](https://api.python.langchain.com/en/latest/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html) You can roughly think of it as an iterator over callback events (though the format differs) - and you can use it on almost all LangChain components! See [this guide](/v0.2/docs/how_to/streaming/#using-stream-events) for more detailed information on how to use `.astream_events()`, including a table listing available events. #### Callbacks[​](#callbacks-1 "Direct link to Callbacks") The lowest level way to stream outputs from LLMs in LangChain is via the [callbacks](/v0.2/docs/concepts/#callbacks) system. You can pass a callback handler that handles the [`on_llm_new_token`](https://api.python.langchain.com/en/latest/callbacks/langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.html#langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.on_llm_new_token) event into LangChain components. When that component is invoked, any [LLM](/v0.2/docs/concepts/#llms) or [chat model](/v0.2/docs/concepts/#chat-models) contained in the component calls the callback with the generated token. Within the callback, you could pipe the tokens into some other destination, e.g. a HTTP response. You can also handle the [`on_llm_end`](https://api.python.langchain.com/en/latest/callbacks/langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.html#langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.on_llm_end) event to perform any necessary cleanup. You can see [this how-to section](/v0.2/docs/how_to/#callbacks) for more specifics on using callbacks. Callbacks were the first technique for streaming introduced in LangChain. While powerful and generalizable, they can be unwieldy for developers. For example: * You need to explicitly initialize and manage some aggregator or other stream to collect results. * The execution order isn't explicitly guaranteed, and you could theoretically have a callback run after the `.invoke()` method finishes. * Providers would often make you pass an additional parameter to stream outputs instead of returning them all at once. * You would often ignore the result of the actual model call in favor of callback results. #### Tokens[​](#tokens "Direct link to Tokens") The unit that most model providers use to measure input and output is via a unit called a **token**. Tokens are the basic units that language models read and generate when processing or producing text. The exact definition of a token can vary depending on the specific way the model was trained - for instance, in English, a token could be a single word like "apple", or a part of a word like "app". When you send a model a prompt, the words and characters in the prompt are encoded into tokens using a **tokenizer**. The model then streams back generated output tokens, which the tokenizer decodes into human-readable text. The below example shows how OpenAI models tokenize `LangChain is cool!`: ![](/v0.2/assets/images/tokenization-10f566ab6774724e63dd99646f69655c.png) You can see that it gets split into 5 different tokens, and that the boundaries between tokens are not exactly the same as word boundaries. The reason language models use tokens rather than something more immediately intuitive like "characters" has to do with how they process and understand text. At a high-level, language models iteratively predict their next generated output based on the initial input and their previous generations. Training the model using tokens language models to handle linguistic units (like words or subwords) that carry meaning, rather than individual characters, which makes it easier for the model to learn and understand the structure of the language, including grammar and context. Furthermore, using tokens can also improve efficiency, since the model processes fewer units of text compared to character-level processing. ### Structured output[​](#structured-output "Direct link to Structured output") LLMs are capable of generating arbitrary text. This enables the model to respond appropriately to a wide range of inputs, but for some use-cases, it can be useful to constrain the LLM's output to a specific format or structure. This is referred to as **structured output**. For example, if the output is to be stored in a relational database, it is much easier if the model generates output that adheres to a defined schema or format. [Extracting specific information](/v0.2/docs/tutorials/extraction/) from unstructured text is another case where this is particularly useful. Most commonly, the output format will be JSON, though other formats such as [YAML](/v0.2/docs/how_to/output_parser_yaml/) can be useful too. Below, we'll discuss a few ways to get structured output from models in LangChain. #### `.with_structured_output()`[​](#with_structured_output "Direct link to with_structured_output") For convenience, some LangChain chat models support a `.with_structured_output()` method. This method only requires a schema as input, and returns a dict or Pydantic object. Generally, this method is only present on models that support one of the more advanced methods described below, and will use one of them under the hood. It takes care of importing a suitable output parser and formatting the schema in the right format for the model. For more information, check out this [how-to guide](/v0.2/docs/how_to/structured_output/#the-with_structured_output-method). #### Raw prompting[​](#raw-prompting "Direct link to Raw prompting") The most intuitive way to get a model to structure output is to ask nicely. In addition to your query, you can give instructions describing what kind of output you'd like, then parse the output using an [output parser](/v0.2/docs/concepts/#output-parsers) to convert the raw model message or string output into something more easily manipulated. The biggest benefit to raw prompting is its flexibility: * Raw prompting does not require any special model features, only sufficient reasoning capability to understand the passed schema. * You can prompt for any format you'd like, not just JSON. This can be useful if the model you are using is more heavily trained on a certain type of data, such as XML or YAML. However, there are some drawbacks too: * LLMs are non-deterministic, and prompting a LLM to consistently output data in the exactly correct format for smooth parsing can be surprisingly difficult and model-specific. * Individual models have quirks depending on the data they were trained on, and optimizing prompts can be quite difficult. Some may be better at interpreting [JSON schema](https://json-schema.org/), others may be best with TypeScript definitions, and still others may prefer XML. While we'll next go over some ways that you can take advantage of features offered by model providers to increase reliability, prompting techniques remain important for tuning your results no matter what method you choose. #### JSON mode[​](#json-mode "Direct link to JSON mode") Some models, such as [Mistral](/v0.2/docs/integrations/chat/mistralai/), [OpenAI](/v0.2/docs/integrations/chat/openai/), [Together AI](/v0.2/docs/integrations/chat/together/) and [Ollama](/v0.2/docs/integrations/chat/ollama/), support a feature called **JSON mode**, usually enabled via config. When enabled, JSON mode will constrain the model's output to always be some sort of valid JSON. Often they require some custom prompting, but it's usually much less burdensome and along the lines of, `"you must always return JSON"`, and the [output is easier to parse](/v0.2/docs/how_to/output_parser_json/). It's also generally simpler and more commonly available than tool calling. Here's an example: from langchain_core.prompts import ChatPromptTemplatefrom langchain_openai import ChatOpenAIfrom langchain.output_parsers.json import SimpleJsonOutputParsermodel = ChatOpenAI( model="gpt-4o", model_kwargs={ "response_format": { "type": "json_object" } },)prompt = ChatPromptTemplate.from_template( "Answer the user's question to the best of your ability." 'You must always output a JSON object with an "answer" key and a "followup_question" key.' "{question}")chain = prompt | model | SimpleJsonOutputParser()chain.invoke({ "question": "What is the powerhouse of the cell?" }) **API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) | [SimpleJsonOutputParser](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.json.SimpleJsonOutputParser.html) {'answer': 'The powerhouse of the cell is the mitochondrion. It is responsible for producing energy in the form of ATP through cellular respiration.', 'followup_question': 'Would you like to know more about how mitochondria produce energy?'} For a full list of model providers that support JSON mode, see [this table](/v0.2/docs/integrations/chat/#advanced-features). #### Function/tool calling[​](#functiontool-calling "Direct link to Function/tool calling") info We use the term tool calling interchangeably with function calling. Although function calling is sometimes meant to refer to invocations of a single function, we treat all models as though they can return multiple tool or function calls in each message Tool calling allows a model to respond to a given prompt by generating output that matches a user-defined schema. While the name implies that the model is performing some action, this is actually not the case! The model is coming up with the arguments to a tool, and actually running the tool (or not) is up to the user - for example, if you want to [extract output matching some schema](/v0.2/docs/tutorials/extraction/) from unstructured text, you could give the model an "extraction" tool that takes parameters matching the desired schema, then treat the generated output as your final result. For models that support it, tool calling can be very convenient. It removes the guesswork around how best to prompt schemas in favor of a built-in model feature. It can also more naturally support agentic flows, since you can just pass multiple tool schemas instead of fiddling with enums or unions. Many LLM providers, including [Anthropic](https://www.anthropic.com/), [Cohere](https://cohere.com/), [Google](https://cloud.google.com/vertex-ai), [Mistral](https://mistral.ai/), [OpenAI](https://openai.com/), and others, support variants of a tool calling feature. These features typically allow requests to the LLM to include available tools and their schemas, and for responses to include calls to these tools. For instance, given a search engine tool, an LLM might handle a query by first issuing a call to the search engine. The system calling the LLM can receive the tool call, execute it, and return the output to the LLM to inform its response. LangChain includes a suite of [built-in tools](/v0.2/docs/integrations/tools/) and supports several methods for defining your own [custom tools](/v0.2/docs/how_to/custom_tools/). LangChain provides a standardized interface for tool calling that is consistent across different models. The standard interface consists of: * `ChatModel.bind_tools()`: a method for specifying which tools are available for a model to call. This method accepts [LangChain tools](/v0.2/docs/concepts/#tools) here. * `AIMessage.tool_calls`: an attribute on the `AIMessage` returned from the model for accessing the tool calls requested by the model. The following how-to guides are good practical resources for using function/tool calling: * [How to return structured data from an LLM](/v0.2/docs/how_to/structured_output/) * [How to use a model to call tools](/v0.2/docs/how_to/tool_calling/) For a full list of model providers that support tool calling, [see this table](/v0.2/docs/integrations/chat/#advanced-features). ### Retrieval[​](#retrieval "Direct link to Retrieval") LLMs are trained on a large but fixed dataset, limiting their ability to reason over private or recent information. Fine-tuning an LLM with specific facts is one way to mitigate this, but is often [poorly suited for factual recall](https://www.anyscale.com/blog/fine-tuning-is-for-form-not-facts) and [can be costly](https://www.glean.com/blog/how-to-build-an-ai-assistant-for-the-enterprise). Retrieval is the process of providing relevant information to an LLM to improve its response for a given input. Retrieval augmented generation (RAG) is the process of grounding the LLM generation (output) using the retrieved information. tip * See our RAG from Scratch [code](https://github.com/langchain-ai/rag-from-scratch) and [video series](https://youtube.com/playlist?list=PLfaIDFEXuae2LXbO1_PKyVJiQ23ZztA0x&feature=shared). * For a high-level guide on retrieval, see this [tutorial on RAG](/v0.2/docs/tutorials/rag/). RAG is only as good as the retrieved documents’ relevance and quality. Fortunately, an emerging set of techniques can be employed to design and improve RAG systems. We've focused on taxonomizing and summarizing many of these techniques (see below figure) and will share some high-level strategic guidance in the following sections. You can and should experiment with using different pieces together. You might also find [this LangSmith guide](https://docs.smith.langchain.com/how_to_guides/evaluation/evaluate_llm_application) useful for showing how to evaluate different iterations of your app. ![](/v0.2/assets/images/rag_landscape-627f1d0fd46b92bc2db0af8f99ec3724.png) #### Query Translation[​](#query-translation "Direct link to Query Translation") First, consider the user input(s) to your RAG system. Ideally, a RAG system can handle a wide range of inputs, from poorly worded questions to complex multi-part queries. **Using an LLM to review and optionally modify the input is the central idea behind query translation.** This serves as a general buffer, optimizing raw user inputs for your retrieval system. For example, this can be as simple as extracting keywords or as complex as generating multiple sub-questions for a complex query. Name When to use Description [Multi-query](/v0.2/docs/how_to/MultiQueryRetriever/) When you need to cover multiple perspectives of a question. Rewrite the user question from multiple perspectives, retrieve documents for each rewritten question, return the unique documents for all queries. [Decomposition](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) When a question can be broken down into smaller subproblems. Decompose a question into a set of subproblems / questions, which can either be solved sequentially (use the answer from first + retrieval to answer the second) or in parallel (consolidate each answer into final answer). [Step-back](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) When a higher-level conceptual understanding is required. First prompt the LLM to ask a generic step-back question about higher-level concepts or principles, and retrieve relevant facts about them. Use this grounding to help answer the user question. [HyDE](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) If you have challenges retrieving relevant documents using the raw user inputs. Use an LLM to convert questions into hypothetical documents that answer the question. Use the embedded hypothetical documents to retrieve real documents with the premise that doc-doc similarity search can produce more relevant matches. tip See our RAG from Scratch videos for a few different specific approaches: * [Multi-query](https://youtu.be/JChPi0CRnDY?feature=shared) * [Decomposition](https://youtu.be/h0OPWlEOank?feature=shared) * [Step-back](https://youtu.be/xn1jEjRyJ2U?feature=shared) * [HyDE](https://youtu.be/SaDzIVkYqyY?feature=shared) #### Routing[​](#routing "Direct link to Routing") Second, consider the data sources available to your RAG system. You want to query across more than one database or across structured and unstructured data sources. **Using an LLM to review the input and route it to the appropriate data source is a simple and effective approach for querying across sources.** Name When to use Description [Logical routing](/v0.2/docs/how_to/routing/) When you can prompt an LLM with rules to decide where to route the input. Logical routing can use an LLM to reason about the query and choose which datastore is most appropriate. [Semantic routing](/v0.2/docs/how_to/routing/#routing-by-semantic-similarity) When semantic similarity is an effective way to determine where to route the input. Semantic routing embeds both query and, typically a set of prompts. It then chooses the appropriate prompt based upon similarity. tip See our RAG from Scratch video on [routing](https://youtu.be/pfpIndq7Fi8?feature=shared). #### Query Construction[​](#query-construction "Direct link to Query Construction") Third, consider whether any of your data sources require specific query formats. Many structured databases use SQL. Vector stores often have specific syntax for applying keyword filters to document metadata. **Using an LLM to convert a natural language query into a query syntax is a popular and powerful approach.** In particular, [text-to-SQL](/v0.2/docs/tutorials/sql_qa/), [text-to-Cypher](/v0.2/docs/tutorials/graph/), and [query analysis for metadata filters](/v0.2/docs/tutorials/query_analysis/#query-analysis) are useful ways to interact with structured, graph, and vector databases respectively. Name When to Use Description [Text to SQL](/v0.2/docs/tutorials/sql_qa/) If users are asking questions that require information housed in a relational database, accessible via SQL. This uses an LLM to transform user input into a SQL query. [Text-to-Cypher](/v0.2/docs/tutorials/graph/) If users are asking questions that require information housed in a graph database, accessible via Cypher. This uses an LLM to transform user input into a Cypher query. [Self Query](/v0.2/docs/how_to/self_query/) If users are asking questions that are better answered by fetching documents based on metadata rather than similarity with the text. This uses an LLM to transform user input into two things: (1) a string to look up semantically, (2) a metadata filter to go along with it. This is useful because oftentimes questions are about the METADATA of documents (not the content itself). tip See our [blog post overview](https://blog.langchain.dev/query-construction/) and RAG from Scratch video on [query construction](https://youtu.be/kl6NwWYxvbM?feature=shared), the process of text-to-DSL where DSL is a domain specific language required to interact with a given database. This converts user questions into structured queries. #### Indexing[​](#indexing "Direct link to Indexing") Fouth, consider the design of your document index. A simple and powerful idea is to **decouple the documents that you index for retrieval from the documents that you pass to the LLM for generation.** Indexing frequently uses embedding models with vector stores, which [compress the semantic information in documents to fixed-size vectors](/v0.2/docs/concepts/#embedding-models). Many RAG approaches focus on splitting documents into chunks and retrieving some number based on similarity to an input question for the LLM. But chunk size and chunk number can be difficult to set and affect results if they do not provide full context for the LLM to answer a question. Furthermore, LLMs are increasingly capable of processing millions of tokens. Two approaches can address this tension: (1) [Multi Vector](/v0.2/docs/how_to/multi_vector/) retriever using an LLM to translate documents into any form (e.g., often into a summary) that is well-suited for indexing, but returns full documents to the LLM for generation. (2) [ParentDocument](/v0.2/docs/how_to/parent_document_retriever/) retriever embeds document chunks, but also returns full documents. The idea is to get the best of both worlds: use concise representations (summaries or chunks) for retrieval, but use the full documents for answer generation. Name Index Type Uses an LLM When to Use Description [Vector store](/v0.2/docs/how_to/vectorstore_retriever/) Vector store No If you are just getting started and looking for something quick and easy. This is the simplest method and the one that is easiest to get started with. It involves creating embeddings for each piece of text. [ParentDocument](/v0.2/docs/how_to/parent_document_retriever/) Vector store + Document Store No If your pages have lots of smaller pieces of distinct information that are best indexed by themselves, but best retrieved all together. This involves indexing multiple chunks for each document. Then you find the chunks that are most similar in embedding space, but you retrieve the whole parent document and return that (rather than individual chunks). [Multi Vector](/v0.2/docs/how_to/multi_vector/) Vector store + Document Store Sometimes during indexing If you are able to extract information from documents that you think is more relevant to index than the text itself. This involves creating multiple vectors for each document. Each vector could be created in a myriad of ways - examples include summaries of the text and hypothetical questions. [Time-Weighted Vector store](/v0.2/docs/how_to/time_weighted_vectorstore/) Vector store No If you have timestamps associated with your documents, and you want to retrieve the most recent ones This fetches documents based on a combination of semantic similarity (as in normal vector retrieval) and recency (looking at timestamps of indexed documents) tip * See our RAG from Scratch video on [indexing fundamentals](https://youtu.be/bjb_EMsTDKI?feature=shared) * See our RAG from Scratch video on [multi vector retriever](https://youtu.be/gTCU9I6QqCE?feature=shared) Fifth, consider ways to improve the quality of your similarity search itself. Embedding models compress text into fixed-length (vector) representations that capture the semantic content of the document. This compression is useful for search / retrieval, but puts a heavy burden on that single vector representation to capture the semantic nuance / detail of the document. In some cases, irrelevant or redundant content can dilute the semantic usefulness of the embedding. [ColBERT](https://docs.google.com/presentation/d/1IRhAdGjIevrrotdplHNcc4aXgIYyKamUKTWtB3m3aMU/edit?usp=sharing) is an interesting approach to address this with a higher granularity embeddings: (1) produce a contextually influenced embedding for each token in the document and query, (2) score similarity between each query token and all document tokens, (3) take the max, (4) do this for all query tokens, and (5) take the sum of the max scores (in step 3) for all query tokens to get a query-document similarity score; this token-wise scoring can yield strong results. ![](/v0.2/assets/images/colbert-0bf5bd7485724d0005a2f5bdadbdaedb.png) There are some additional tricks to improve the quality of your retrieval. Embeddings excel at capturing semantic information, but may struggle with keyword-based queries. Many [vector stores](/v0.2/docs/integrations/retrievers/pinecone_hybrid_search/) offer built-in [hybrid-search](https://docs.pinecone.io/guides/data/understanding-hybrid-search) to combine keyword and semantic similarity, which marries the benefits of both approaches. Furthermore, many vector stores have [maximal marginal relevance](https://python.langchain.com/v0.1/docs/modules/model_io/prompts/example_selectors/mmr/), which attempts to diversify the results of a search to avoid returning similar and redundant documents. Name When to use Description [ColBERT](/v0.2/docs/integrations/providers/ragatouille/#using-colbert-as-a-reranker) When higher granularity embeddings are needed. ColBERT uses contextually influenced embeddings for each token in the document and query to get a granular query-document similarity score. [Hybrid search](/v0.2/docs/integrations/retrievers/pinecone_hybrid_search/) When combining keyword-based and semantic similarity. Hybrid search combines keyword and semantic similarity, marrying the benefits of both approaches. [Maximal Marginal Relevance (MMR)](/v0.2/docs/integrations/vectorstores/pinecone/#maximal-marginal-relevance-searches) When needing to diversify search results. MMR attempts to diversify the results of a search to avoid returning similar and redundant documents. tip See our RAG from Scratch video on [ColBERT](https://youtu.be/cN6S0Ehm7_8?feature=shared%3E). #### Post-processing[​](#post-processing "Direct link to Post-processing") Sixth, consider ways to filter or rank retrieved documents. This is very useful if you are [combining documents returned from multiple sources](/v0.2/docs/integrations/retrievers/cohere-reranker/#doing-reranking-with-coherererank), since it can can down-rank less relevant documents and / or [compress similar documents](/v0.2/docs/how_to/contextual_compression/#more-built-in-compressors-filters). Name Index Type Uses an LLM When to Use Description [Contextual Compression](/v0.2/docs/how_to/contextual_compression/) Any Sometimes If you are finding that your retrieved documents contain too much irrelevant information and are distracting the LLM. This puts a post-processing step on top of another retriever and extracts only the most relevant information from retrieved documents. This can be done with embeddings or an LLM. [Ensemble](/v0.2/docs/how_to/ensemble_retriever/) Any No If you have multiple retrieval methods and want to try combining them. This fetches documents from multiple retrievers and then combines them. [Re-ranking](/v0.2/docs/integrations/retrievers/cohere-reranker/) Any Yes If you want to rank retrieved documents based upon relevance, especially if you want to combine results from multiple retrieval methods . Given a query and a list of documents, Rerank indexes the documents from most to least semantically relevant to the query. tip See our RAG from Scratch video on [RAG-Fusion](https://youtu.be/77qELPbNgxA?feature=shared), on approach for post-processing across multiple queries: Rewrite the user question from multiple perspectives, retrieve documents for each rewritten question, and combine the ranks of multiple search result lists to produce a single, unified ranking with [Reciprocal Rank Fusion (RRF)](https://towardsdatascience.com/forget-rag-the-future-is-rag-fusion-1147298d8ad1). #### Generation[​](#generation "Direct link to Generation") **Finally, consider ways to build self-correction into your RAG system.** RAG systems can suffer from low quality retrieval (e.g., if a user question is out of the domain for the index) and / or hallucinations in generation. A naive retrieve-generate pipeline has no ability to detect or self-correct from these kinds of errors. The concept of ["flow engineering"](https://x.com/karpathy/status/1748043513156272416) has been introduced [in the context of code generation](https://arxiv.org/abs/2401.08500): iteratively build an answer to a code question with unit tests to check and self-correct errors. Several works have applied this RAG, such as Self-RAG and Corrective-RAG. In both cases, checks for document relevance, hallucinations, and / or answer quality are performed in the RAG answer generation flow. We've found that graphs are a great way to reliably express logical flows and have implemented ideas from several of these papers [using LangGraph](https://github.com/langchain-ai/langgraph/tree/main/examples/rag), as shown in the figure below (red - routing, blue - fallback, green - self-correction): * **Routing:** Adaptive RAG ([paper](https://arxiv.org/abs/2403.14403)). Route questions to different retrieval approaches, as discussed above * **Fallback:** Corrective RAG ([paper](https://arxiv.org/pdf/2401.15884.pdf)). Fallback to web search if docs are not relevant to query * **Self-correction:** Self-RAG ([paper](https://arxiv.org/abs/2310.11511)). Fix answers w/ hallucinations or don’t address question ![](/v0.2/assets/images/langgraph_rag-f039b41ef268bf46783706e58726fd9c.png) Name When to use Description Self-RAG When needing to fix answers with hallucinations or irrelevant content. Self-RAG performs checks for document relevance, hallucinations, and answer quality during the RAG answer generation flow, iteratively building an answer and self-correcting errors. Corrective-RAG When needing a fallback mechanism for low relevance docs. Corrective-RAG includes a fallback (e.g., to web search) if the retrieved documents are not relevant to the query, ensuring higher quality and more relevant retrieval. tip See several videos and cookbooks showcasing RAG with LangGraph: * [LangGraph Corrective RAG](https://www.youtube.com/watch?v=E2shqsYwxck) * [LangGraph combining Adaptive, Self-RAG, and Corrective RAG](https://www.youtube.com/watch?v=-ROS6gfYIts) * [Cookbooks for RAG using LangGraph](https://github.com/langchain-ai/langgraph/tree/main/examples/rag) See our LangGraph RAG recipes with partners: * [Meta](https://github.com/meta-llama/llama-recipes/tree/main/recipes/use_cases/agents/langchain) * [Mistral](https://github.com/mistralai/cookbook/tree/main/third_party/langchain) ### Text splitting[​](#text-splitting "Direct link to Text splitting") LangChain offers many different types of `text splitters`. These all live in the `langchain-text-splitters` package. Table columns: * **Name**: Name of the text splitter * **Classes**: Classes that implement this text splitter * **Splits On**: How this text splitter splits text * **Adds Metadata**: Whether or not this text splitter adds metadata about where each chunk came from. * **Description**: Description of the splitter, including recommendation on when to use it. Name Classes Splits On Adds Metadata Description Recursive [RecursiveCharacterTextSplitter](/v0.2/docs/how_to/recursive_text_splitter/), [RecursiveJsonSplitter](/v0.2/docs/how_to/recursive_json_splitter/) A list of user defined characters Recursively splits text. This splitting is trying to keep related pieces of text next to each other. This is the `recommended way` to start splitting text. HTML [HTMLHeaderTextSplitter](/v0.2/docs/how_to/HTML_header_metadata_splitter/), [HTMLSectionSplitter](/v0.2/docs/how_to/HTML_section_aware_splitter/) HTML specific characters ✅ Splits text based on HTML-specific characters. Notably, this adds in relevant information about where that chunk came from (based on the HTML) Markdown [MarkdownHeaderTextSplitter](/v0.2/docs/how_to/markdown_header_metadata_splitter/), Markdown specific characters ✅ Splits text based on Markdown-specific characters. Notably, this adds in relevant information about where that chunk came from (based on the Markdown) Code [many languages](/v0.2/docs/how_to/code_splitter/) Code (Python, JS) specific characters Splits text based on characters specific to coding languages. 15 different languages are available to choose from. Token [many classes](/v0.2/docs/how_to/split_by_token/) Tokens Splits text on tokens. There exist a few different ways to measure tokens. Character [CharacterTextSplitter](/v0.2/docs/how_to/character_text_splitter/) A user defined character Splits text based on a user defined character. One of the simpler methods. Semantic Chunker (Experimental) [SemanticChunker](/v0.2/docs/how_to/semantic-chunker/) Sentences First splits on sentences. Then combines ones next to each other if they are semantically similar enough. Taken from [Greg Kamradt](https://github.com/FullStackRetrieval-com/RetrievalTutorials/blob/main/tutorials/LevelsOfTextSplitting/5_Levels_Of_Text_Splitting.ipynb) Integration: AI21 Semantic [AI21SemanticTextSplitter](/v0.2/docs/integrations/document_transformers/ai21_semantic_text_splitter/) ✅ Identifies distinct topics that form coherent pieces of text and splits along those. ### Evaluation[​](#evaluation "Direct link to Evaluation") Evaluation is the process of assessing the performance and effectiveness of your LLM-powered applications. It involves testing the model's responses against a set of predefined criteria or benchmarks to ensure it meets the desired quality standards and fulfills the intended purpose. This process is vital for building reliable applications. ![](/v0.2/assets/images/langsmith_evaluate-7d48643f3e4c50d77234e13feb95144d.png) [LangSmith](https://docs.smith.langchain.com/) helps with this process in a few ways: * It makes it easier to create and curate datasets via its tracing and annotation features * It provides an evaluation framework that helps you define metrics and run your app against your dataset * It allows you to track results over time and automatically run your evaluators on a schedule or as part of CI/Code To learn more, check out [this LangSmith guide](https://docs.smith.langchain.com/concepts/evaluation). [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/concepts.mdx) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to create and query vector stores ](/v0.2/docs/how_to/vectorstores/)[ Next 🦜️🏓 LangServe ](/v0.2/docs/langserve/) * [Architecture](#architecture) * [`langchain-core`](#langchain-core) * [Partner packages](#partner-packages) * [`langchain`](#langchain) * [`langchain-community`](#langchain-community) * [`langgraph`](#langgraph) * [`langserve`](#langserve) * [LangSmith](#langsmith) * [LangChain Expression Language (LCEL)](#langchain-expression-language-lcel) * [Runnable interface](#runnable-interface) * [Components](#components) * [Chat models](#chat-models) * [LLMs](#llms) * [Messages](#messages) * [Prompt templates](#prompt-templates) * [Example selectors](#example-selectors) * [Output parsers](#output-parsers) * [Chat history](#chat-history) * [Documents](#documents) * [Document loaders](#document-loaders) * [Text splitters](#text-splitters) * [Embedding models](#embedding-models) * [Vector stores](#vector-stores) * [Retrievers](#retrievers) * [Tools](#tools) * [Toolkits](#toolkits) * [Agents](#agents) * [Callbacks](#callbacks) * [Techniques](#techniques) * [Streaming](#streaming) * [Structured output](#structured-output) * [Retrieval](#retrieval) * [Text splitting](#text-splitting) * [Evaluation](#evaluation)
null
https://python.langchain.com/v0.2/docs/how_to/query_multiple_retrievers/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to handle multiple retrievers when doing query analysis On this page How to handle multiple retrievers when doing query analysis =========================================================== Sometimes, a query analysis technique may allow for selection of which retriever to use. To use this, you will need to add some logic to select the retriever to do. We will show a simple example (using mock data) of how to do that. Setup[​](#setup "Direct link to Setup") --------------------------------------- #### Install dependencies[​](#install-dependencies "Direct link to Install dependencies") # %pip install -qU langchain langchain-community langchain-openai langchain-chroma #### Set environment variables[​](#set-environment-variables "Direct link to Set environment variables") We'll use OpenAI in this example: import getpassimport osos.environ["OPENAI_API_KEY"] = getpass.getpass()# Optional, uncomment to trace runs with LangSmith. Sign up here: https://smith.langchain.com.# os.environ["LANGCHAIN_TRACING_V2"] = "true"# os.environ["LANGCHAIN_API_KEY"] = getpass.getpass() ### Create Index[​](#create-index "Direct link to Create Index") We will create a vectorstore over fake information. from langchain_chroma import Chromafrom langchain_openai import OpenAIEmbeddingsfrom langchain_text_splitters import RecursiveCharacterTextSplittertexts = ["Harrison worked at Kensho"]embeddings = OpenAIEmbeddings(model="text-embedding-3-small")vectorstore = Chroma.from_texts(texts, embeddings, collection_name="harrison")retriever_harrison = vectorstore.as_retriever(search_kwargs={"k": 1})texts = ["Ankush worked at Facebook"]embeddings = OpenAIEmbeddings(model="text-embedding-3-small")vectorstore = Chroma.from_texts(texts, embeddings, collection_name="ankush")retriever_ankush = vectorstore.as_retriever(search_kwargs={"k": 1}) **API Reference:**[OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) | [RecursiveCharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.RecursiveCharacterTextSplitter.html) Query analysis[​](#query-analysis "Direct link to Query analysis") ------------------------------------------------------------------ We will use function calling to structure the output. We will let it return multiple queries. from typing import List, Optionalfrom langchain_core.pydantic_v1 import BaseModel, Fieldclass Search(BaseModel): """Search for information about a person.""" query: str = Field( ..., description="Query to look up", ) person: str = Field( ..., description="Person to look things up for. Should be `HARRISON` or `ANKUSH`.", ) from langchain_core.output_parsers.openai_tools import PydanticToolsParserfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.runnables import RunnablePassthroughfrom langchain_openai import ChatOpenAIoutput_parser = PydanticToolsParser(tools=[Search])system = """You have the ability to issue search queries to get information to help answer user information."""prompt = ChatPromptTemplate.from_messages( [ ("system", system), ("human", "{question}"), ])llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)structured_llm = llm.with_structured_output(Search)query_analyzer = {"question": RunnablePassthrough()} | prompt | structured_llm **API Reference:**[PydanticToolsParser](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.openai_tools.PydanticToolsParser.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) We can see that this allows for routing between retrievers query_analyzer.invoke("where did Harrison Work") Search(query='workplace', person='HARRISON') query_analyzer.invoke("where did ankush Work") Search(query='workplace', person='ANKUSH') Retrieval with query analysis[​](#retrieval-with-query-analysis "Direct link to Retrieval with query analysis") --------------------------------------------------------------------------------------------------------------- So how would we include this in a chain? We just need some simple logic to select the retriever and pass in the search query from langchain_core.runnables import chain **API Reference:**[chain](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.chain.html) retrievers = { "HARRISON": retriever_harrison, "ANKUSH": retriever_ankush,} @chaindef custom_chain(question): response = query_analyzer.invoke(question) retriever = retrievers[response.person] return retriever.invoke(response.query) custom_chain.invoke("where did Harrison Work") [Document(page_content='Harrison worked at Kensho')] custom_chain.invoke("where did ankush Work") [Document(page_content='Ankush worked at Facebook')] [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/query_multiple_retrievers.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to compose prompts together ](/v0.2/docs/how_to/prompts_composition/)[ Next How to add values to a chain's state ](/v0.2/docs/how_to/assign/) * [Setup](#setup) * [Create Index](#create-index) * [Query analysis](#query-analysis) * [Retrieval with query analysis](#retrieval-with-query-analysis)
null
https://python.langchain.com/v0.2/docs/how_to/assign/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to add values to a chain's state On this page How to add values to a chain's state ==================================== Prerequisites This guide assumes familiarity with the following concepts: * [LangChain Expression Language (LCEL)](/v0.2/docs/concepts/#langchain-expression-language) * [Chaining runnables](/v0.2/docs/how_to/sequence/) * [Calling runnables in parallel](/v0.2/docs/how_to/parallel/) * [Custom functions](/v0.2/docs/how_to/functions/) * [Passing data through](/v0.2/docs/how_to/passthrough/) An alternate way of [passing data through](/v0.2/docs/how_to/passthrough/) steps of a chain is to leave the current values of the chain state unchanged while assigning a new value under a given key. The [`RunnablePassthrough.assign()`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html#langchain_core.runnables.passthrough.RunnablePassthrough.assign) static method takes an input value and adds the extra arguments passed to the assign function. This is useful in the common [LangChain Expression Language](/v0.2/docs/concepts/#langchain-expression-language) pattern of additively creating a dictionary to use as input to a later step. Here's an example: %pip install --upgrade --quiet langchain langchain-openaiimport osfrom getpass import getpassos.environ["OPENAI_API_KEY"] = getpass() from langchain_core.runnables import RunnableParallel, RunnablePassthroughrunnable = RunnableParallel( extra=RunnablePassthrough.assign(mult=lambda x: x["num"] * 3), modified=lambda x: x["num"] + 1,)runnable.invoke({"num": 1}) **API Reference:**[RunnableParallel](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html) | [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html) {'extra': {'num': 1, 'mult': 3}, 'modified': 2} Let's break down what's happening here. * The input to the chain is `{"num": 1}`. This is passed into a `RunnableParallel`, which invokes the runnables it is passed in parallel with that input. * The value under the `extra` key is invoked. `RunnablePassthrough.assign()` keeps the original keys in the input dict (`{"num": 1}`), and assigns a new key called `mult`. The value is `lambda x: x["num"] * 3)`, which is `3`. Thus, the result is `{"num": 1, "mult": 3}`. * `{"num": 1, "mult": 3}` is returned to the `RunnableParallel` call, and is set as the value to the key `extra`. * At the same time, the `modified` key is called. The result is `2`, since the lambda extracts a key called `"num"` from its input and adds one. Thus, the result is `{'extra': {'num': 1, 'mult': 3}, 'modified': 2}`. Streaming[​](#streaming "Direct link to Streaming") --------------------------------------------------- One convenient feature of this method is that it allows values to pass through as soon as they are available. To show this off, we'll use `RunnablePassthrough.assign()` to immediately return source docs in a retrieval chain: from langchain_community.vectorstores import FAISSfrom langchain_core.output_parsers import StrOutputParserfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.runnables import RunnablePassthroughfrom langchain_openai import ChatOpenAI, OpenAIEmbeddingsvectorstore = FAISS.from_texts( ["harrison worked at kensho"], embedding=OpenAIEmbeddings())retriever = vectorstore.as_retriever()template = """Answer the question based only on the following context:{context}Question: {question}"""prompt = ChatPromptTemplate.from_template(template)model = ChatOpenAI()generation_chain = prompt | model | StrOutputParser()retrieval_chain = { "context": retriever, "question": RunnablePassthrough(),} | RunnablePassthrough.assign(output=generation_chain)stream = retrieval_chain.stream("where did harrison work?")for chunk in stream: print(chunk) **API Reference:**[FAISS](https://api.python.langchain.com/en/latest/vectorstores/langchain_community.vectorstores.faiss.FAISS.html) | [StrOutputParser](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.string.StrOutputParser.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) {'question': 'where did harrison work?'}{'context': [Document(page_content='harrison worked at kensho')]}{'output': ''}{'output': 'H'}{'output': 'arrison'}{'output': ' worked'}{'output': ' at'}{'output': ' Kens'}{'output': 'ho'}{'output': '.'}{'output': ''} We can see that the first chunk contains the original `"question"` since that is immediately available. The second chunk contains `"context"` since the retriever finishes second. Finally, the output from the `generation_chain` streams in chunks as soon as it is available. Next steps[​](#next-steps "Direct link to Next steps") ------------------------------------------------------ Now you've learned how to pass data through your chains to help to help format the data flowing through your chains. To learn more, see the other how-to guides on runnables in this section. [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/assign.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to handle multiple retrievers when doing query analysis ](/v0.2/docs/how_to/query_multiple_retrievers/)[ Next How to construct filters for query analysis ](/v0.2/docs/how_to/query_constructing_filters/) * [Streaming](#streaming) * [Next steps](#next-steps)
null
https://python.langchain.com/v0.2/docs/langserve/
* [](/v0.2/) * Ecosystem * 🦜️🏓 LangServe On this page 🦜️🏓 LangServe =============== [![Release Notes](https://img.shields.io/github/release/langchain-ai/langserve)](https://github.com/langchain-ai/langserve/releases) [![Downloads](https://static.pepy.tech/badge/langserve/month)](https://pepy.tech/project/langserve) [![Open Issues](https://img.shields.io/github/issues-raw/langchain-ai/langserve)](https://github.com/langchain-ai/langserve/issues) [![](https://dcbadge.vercel.app/api/server/6adMQxSpJS?compact=true&style=flat)](https://discord.com/channels/1038097195422978059/1170024642245832774) 🚩 We will be releasing a hosted version of LangServe for one-click deployments of LangChain applications. [Sign up here](https://forms.gle/KC13Nzn76UeLaghK7) to get on the waitlist. Overview[​](#overview "Direct link to Overview") ------------------------------------------------ [LangServe](https://github.com/langchain-ai/langserve) helps developers deploy `LangChain` [runnables and chains](https://python.langchain.com/docs/expression_language/) as a REST API. This library is integrated with [FastAPI](https://fastapi.tiangolo.com/) and uses [pydantic](https://docs.pydantic.dev/latest/) for data validation. In addition, it provides a client that can be used to call into runnables deployed on a server. A JavaScript client is available in [LangChain.js](https://js.langchain.com/docs/ecosystem/langserve). Features[​](#features "Direct link to Features") ------------------------------------------------ * Input and Output schemas automatically inferred from your LangChain object, and enforced on every API call, with rich error messages * API docs page with JSONSchema and Swagger (insert example link) * Efficient `/invoke`, `/batch` and `/stream` endpoints with support for many concurrent requests on a single server * `/stream_log` endpoint for streaming all (or some) intermediate steps from your chain/agent * **new** as of 0.0.40, supports `/stream_events` to make it easier to stream without needing to parse the output of `/stream_log`. * Playground page at `/playground/` with streaming output and intermediate steps * Built-in (optional) tracing to [LangSmith](https://www.langchain.com/langsmith), just add your API key (see [Instructions](https://docs.smith.langchain.com/)) * All built with battle-tested open-source Python libraries like FastAPI, Pydantic, uvloop and asyncio. * Use the client SDK to call a LangServe server as if it was a Runnable running locally (or call the HTTP API directly) * [LangServe Hub](https://github.com/langchain-ai/langchain/blob/master/templates/README.md) Limitations[​](#limitations "Direct link to Limitations") --------------------------------------------------------- * Client callbacks are not yet supported for events that originate on the server * OpenAPI docs will not be generated when using Pydantic V2. Fast API does not support [mixing pydantic v1 and v2 namespaces](https://github.com/tiangolo/fastapi/issues/10360). See section below for more details. Hosted LangServe[​](#hosted-langserve "Direct link to Hosted LangServe") ------------------------------------------------------------------------ We will be releasing a hosted version of LangServe for one-click deployments of LangChain applications. [Sign up here](https://forms.gle/KC13Nzn76UeLaghK7) to get on the waitlist. Security[​](#security "Direct link to Security") ------------------------------------------------ * Vulnerability in Versions 0.0.13 - 0.0.15 -- playground endpoint allows accessing arbitrary files on server. [Resolved in 0.0.16](https://github.com/langchain-ai/langserve/pull/98). Installation[​](#installation "Direct link to Installation") ------------------------------------------------------------ For both client and server: pip install "langserve[all]" or `pip install "langserve[client]"` for client code, and `pip install "langserve[server]"` for server code. LangChain CLI 🛠️[​](#langchain-cli-️ "Direct link to LangChain CLI 🛠️") ------------------------------------------------------------------------- Use the `LangChain` CLI to bootstrap a `LangServe` project quickly. To use the langchain CLI make sure that you have a recent version of `langchain-cli` installed. You can install it with `pip install -U langchain-cli`. Setup[​](#setup "Direct link to Setup") --------------------------------------- **Note**: We use `poetry` for dependency management. Please follow poetry [doc](https://python-poetry.org/docs/) to learn more about it. ### 1\. Create new app using langchain cli command[​](#1-create-new-app-using-langchain-cli-command "Direct link to 1. Create new app using langchain cli command") langchain app new my-app ### 2\. Define the runnable in add\_routes. Go to server.py and edit[​](#2-define-the-runnable-in-add_routes-go-to-serverpy-and-edit "Direct link to 2. Define the runnable in add_routes. Go to server.py and edit") add_routes(app. NotImplemented) ### 3\. Use `poetry` to add 3rd party packages (e.g., langchain-openai, langchain-anthropic, langchain-mistral etc).[​](#3-use-poetry-to-add-3rd-party-packages-eg-langchain-openai-langchain-anthropic-langchain-mistral-etc "Direct link to 3-use-poetry-to-add-3rd-party-packages-eg-langchain-openai-langchain-anthropic-langchain-mistral-etc") poetry add [package-name] // e.g `poetry add langchain-openai` ### 4\. Set up relevant env variables. For example,[​](#4-set-up-relevant-env-variables-for-example "Direct link to 4. Set up relevant env variables. For example,") export OPENAI_API_KEY="sk-..." ### 5\. Serve your app[​](#5-serve-your-app "Direct link to 5. Serve your app") poetry run langchain serve --port=8100 Examples[​](#examples "Direct link to Examples") ------------------------------------------------ Get your LangServe instance started quickly with [LangChain Templates](https://github.com/langchain-ai/langchain/blob/master/templates/README.md). For more examples, see the templates [index](https://github.com/langchain-ai/langchain/blob/master/templates/docs/INDEX.md) or the [examples](https://github.com/langchain-ai/langserve/tree/main/examples) directory. Description Links **LLMs** Minimal example that reserves OpenAI and Anthropic chat models. Uses async, supports batching and streaming. [server](https://github.com/langchain-ai/langserve/tree/main/examples/llm/server.py), [client](https://github.com/langchain-ai/langserve/blob/main/examples/llm/client.ipynb) **Retriever** Simple server that exposes a retriever as a runnable. [server](https://github.com/langchain-ai/langserve/tree/main/examples/retrieval/server.py), [client](https://github.com/langchain-ai/langserve/tree/main/examples/retrieval/client.ipynb) **Conversational Retriever** A [Conversational Retriever](https://python.langchain.com/docs/expression_language/cookbook/retrieval#conversational-retrieval-chain) exposed via LangServe [server](https://github.com/langchain-ai/langserve/tree/main/examples/conversational_retrieval_chain/server.py), [client](https://github.com/langchain-ai/langserve/tree/main/examples/conversational_retrieval_chain/client.ipynb) **Agent** without **conversation history** based on [OpenAI tools](https://python.langchain.com/docs/modules/agents/agent_types/openai_functions_agent) [server](https://github.com/langchain-ai/langserve/tree/main/examples/agent/server.py), [client](https://github.com/langchain-ai/langserve/tree/main/examples/agent/client.ipynb) **Agent** with **conversation history** based on [OpenAI tools](https://python.langchain.com/docs/modules/agents/agent_types/openai_functions_agent) [server](https://github.com/langchain-ai/langserve/blob/main/examples/agent_with_history/server.py), [client](https://github.com/langchain-ai/langserve/blob/main/examples/agent_with_history/client.ipynb) [RunnableWithMessageHistory](https://python.langchain.com/docs/expression_language/how_to/message_history) to implement chat persisted on backend, keyed off a `session_id` supplied by client. [server](https://github.com/langchain-ai/langserve/tree/main/examples/chat_with_persistence/server.py), [client](https://github.com/langchain-ai/langserve/tree/main/examples/chat_with_persistence/client.ipynb) [RunnableWithMessageHistory](https://python.langchain.com/docs/expression_language/how_to/message_history) to implement chat persisted on backend, keyed off a `conversation_id` supplied by client, and `user_id` (see Auth for implementing `user_id` properly). [server](https://github.com/langchain-ai/langserve/tree/main/examples/chat_with_persistence_and_user/server.py), [client](https://github.com/langchain-ai/langserve/tree/main/examples/chat_with_persistence_and_user/client.ipynb) [Configurable Runnable](https://python.langchain.com/docs/expression_language/how_to/configure) to create a retriever that supports run time configuration of the index name. [server](https://github.com/langchain-ai/langserve/tree/main/examples/configurable_retrieval/server.py), [client](https://github.com/langchain-ai/langserve/tree/main/examples/configurable_retrieval/client.ipynb) [Configurable Runnable](https://python.langchain.com/docs/expression_language/how_to/configure) that shows configurable fields and configurable alternatives. [server](https://github.com/langchain-ai/langserve/tree/main/examples/configurable_chain/server.py), [client](https://github.com/langchain-ai/langserve/tree/main/examples/configurable_chain/client.ipynb) **APIHandler** Shows how to use `APIHandler` instead of `add_routes`. This provides more flexibility for developers to define endpoints. Works well with all FastAPI patterns, but takes a bit more effort. [server](https://github.com/langchain-ai/langserve/tree/main/examples/api_handler_examples/server.py) **LCEL Example** Example that uses LCEL to manipulate a dictionary input. [server](https://github.com/langchain-ai/langserve/tree/main/examples/passthrough_dict/server.py), [client](https://github.com/langchain-ai/langserve/tree/main/examples/passthrough_dict/client.ipynb) **Auth** with `add_routes`: Simple authentication that can be applied across all endpoints associated with app. (Not useful on its own for implementing per user logic.) [server](https://github.com/langchain-ai/langserve/tree/main/examples/auth/global_deps/server.py) **Auth** with `add_routes`: Simple authentication mechanism based on path dependencies. (No useful on its own for implementing per user logic.) [server](https://github.com/langchain-ai/langserve/tree/main/examples/auth/path_dependencies/server.py) **Auth** with `add_routes`: Implement per user logic and auth for endpoints that use per request config modifier. (**Note**: At the moment, does not integrate with OpenAPI docs.) [server](https://github.com/langchain-ai/langserve/tree/main/examples/auth/per_req_config_modifier/server.py), [client](https://github.com/langchain-ai/langserve/tree/main/examples/auth/per_req_config_modifier/client.ipynb) **Auth** with `APIHandler`: Implement per user logic and auth that shows how to search only within user owned documents. [server](https://github.com/langchain-ai/langserve/tree/main/examples/auth/api_handler/server.py), [client](https://github.com/langchain-ai/langserve/tree/main/examples/auth/api_handler/client.ipynb) **Widgets** Different widgets that can be used with playground (file upload and chat) [server](https://github.com/langchain-ai/langserve/tree/main/examples/widgets/chat/tuples/server.py) **Widgets** File upload widget used for LangServe playground. [server](https://github.com/langchain-ai/langserve/tree/main/examples/file_processing/server.py), [client](https://github.com/langchain-ai/langserve/tree/main/examples/file_processing/client.ipynb) Sample Application[​](#sample-application "Direct link to Sample Application") ------------------------------------------------------------------------------ ### Server[​](#server "Direct link to Server") Here's a server that deploys an OpenAI chat model, an Anthropic chat model, and a chain that uses the Anthropic model to tell a joke about a topic. #!/usr/bin/env pythonfrom fastapi import FastAPIfrom langchain.prompts import ChatPromptTemplatefrom langchain.chat_models import ChatAnthropic, ChatOpenAIfrom langserve import add_routesapp = FastAPI( title="LangChain Server", version="1.0", description="A simple api server using Langchain's Runnable interfaces",)add_routes( app, ChatOpenAI(model="gpt-3.5-turbo-0125"), path="/openai",)add_routes( app, ChatAnthropic(model="claude-3-haiku-20240307"), path="/anthropic",)model = ChatAnthropic(model="claude-3-haiku-20240307")prompt = ChatPromptTemplate.from_template("tell me a joke about {topic}")add_routes( app, prompt | model, path="/joke",)if __name__ == "__main__": import uvicorn uvicorn.run(app, host="localhost", port=8000) **API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [ChatAnthropic](https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.anthropic.ChatAnthropic.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.openai.ChatOpenAI.html) If you intend to call your endpoint from the browser, you will also need to set CORS headers. You can use FastAPI's built-in middleware for that: from fastapi.middleware.cors import CORSMiddleware# Set all CORS enabled originsapp.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], expose_headers=["*"],) ### Docs[​](#docs "Direct link to Docs") If you've deployed the server above, you can view the generated OpenAPI docs using: > ⚠️ If using pydantic v2, docs will not be generated for _invoke_, _batch_, _stream_, _stream\_log_. See [Pydantic](#pydantic) section below for more details. curl localhost:8000/docs make sure to **add** the `/docs` suffix. > ⚠️ Index page `/` is not defined by **design**, so `curl localhost:8000` or visiting the URL will return a 404. If you want content at `/` define an endpoint `@app.get("/")`. ### Client[​](#client "Direct link to Client") Python SDK from langchain.schema import SystemMessage, HumanMessagefrom langchain.prompts import ChatPromptTemplatefrom langchain.schema.runnable import RunnableMapfrom langserve import RemoteRunnableopenai = RemoteRunnable("http://localhost:8000/openai/")anthropic = RemoteRunnable("http://localhost:8000/anthropic/")joke_chain = RemoteRunnable("http://localhost:8000/joke/")joke_chain.invoke({"topic": "parrots"})# or asyncawait joke_chain.ainvoke({"topic": "parrots"})prompt = [ SystemMessage(content='Act like either a cat or a parrot.'), HumanMessage(content='Hello!')]# Supports astreamasync for msg in anthropic.astream(prompt): print(msg, end="", flush=True)prompt = ChatPromptTemplate.from_messages( [("system", "Tell me a long story about {topic}")])# Can define custom chainschain = prompt | RunnableMap({ "openai": openai, "anthropic": anthropic,})chain.batch([{"topic": "parrots"}, {"topic": "cats"}]) **API Reference:**[SystemMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.system.SystemMessage.html) | [HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [RunnableMap](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableMap.html) In TypeScript (requires LangChain.js version 0.0.166 or later): import { RemoteRunnable } from "@langchain/core/runnables/remote";const chain = new RemoteRunnable({ url: `http://localhost:8000/joke/`,});const result = await chain.invoke({ topic: "cats",}); Python using `requests`: import requestsresponse = requests.post( "http://localhost:8000/joke/invoke", json={'input': {'topic': 'cats'}})response.json() You can also use `curl`: curl --location --request POST 'http://localhost:8000/joke/invoke' \ --header 'Content-Type: application/json' \ --data-raw '{ "input": { "topic": "cats" } }' Endpoints[​](#endpoints "Direct link to Endpoints") --------------------------------------------------- The following code: ...add_routes( app, runnable, path="/my_runnable",) adds of these endpoints to the server: * `POST /my_runnable/invoke` - invoke the runnable on a single input * `POST /my_runnable/batch` - invoke the runnable on a batch of inputs * `POST /my_runnable/stream` - invoke on a single input and stream the output * `POST /my_runnable/stream_log` - invoke on a single input and stream the output, including output of intermediate steps as it's generated * `POST /my_runnable/astream_events` - invoke on a single input and stream events as they are generated, including from intermediate steps. * `GET /my_runnable/input_schema` - json schema for input to the runnable * `GET /my_runnable/output_schema` - json schema for output of the runnable * `GET /my_runnable/config_schema` - json schema for config of the runnable These endpoints match the [LangChain Expression Language interface](https://python.langchain.com/docs/expression_language/interface) -- please reference this documentation for more details. Playground[​](#playground "Direct link to Playground") ------------------------------------------------------ You can find a playground page for your runnable at `/my_runnable/playground/`. This exposes a simple UI to [configure](https://python.langchain.com/docs/expression_language/how_to/configure) and invoke your runnable with streaming output and intermediate steps. ![](https://github.com/langchain-ai/langserve/assets/3205522/5ca56e29-f1bb-40f4-84b5-15916384a276) ### Widgets[​](#widgets "Direct link to Widgets") The playground supports [widgets](#playground-widgets) and can be used to test your runnable with different inputs. See the [widgets](#widgets) section below for more details. ### Sharing[​](#sharing "Direct link to Sharing") In addition, for configurable runnables, the playground will allow you to configure the runnable and share a link with the configuration: ![](https://github.com/langchain-ai/langserve/assets/3205522/86ce9c59-f8e4-4d08-9fa3-62030e0f521d) Chat playground[​](#chat-playground "Direct link to Chat playground") --------------------------------------------------------------------- LangServe also supports a chat-focused playground that opt into and use under `/my_runnable/playground/`. Unlike the general playground, only certain types of runnables are supported - the runnable's input schema must be a `dict` with either: * a single key, and that key's value must be a list of chat messages. * two keys, one whose value is a list of messages, and the other representing the most recent message. We recommend you use the first format. The runnable must also return either an `AIMessage` or a string. To enable it, you must set `playground_type="chat",` when adding your route. Here's an example: # Declare a chainprompt = ChatPromptTemplate.from_messages( [ ("system", "You are a helpful, professional assistant named Cob."), MessagesPlaceholder(variable_name="messages"), ])chain = prompt | ChatAnthropic(model="claude-2")class InputChat(BaseModel): """Input for the chat endpoint.""" messages: List[Union[HumanMessage, AIMessage, SystemMessage]] = Field( ..., description="The chat messages representing the current conversation.", )add_routes( app, chain.with_types(input_type=InputChat), enable_feedback_endpoint=True, enable_public_trace_link_endpoint=True, playground_type="chat",) If you are using LangSmith, you can also set `enable_feedback_endpoint=True` on your route to enable thumbs-up/thumbs-down buttons after each message, and `enable_public_trace_link_endpoint=True` to add a button that creates a public traces for runs. Note that you will also need to set the following environment variables: export LANGCHAIN_TRACING_V2="true"export LANGCHAIN_PROJECT="YOUR_PROJECT_NAME"export LANGCHAIN_API_KEY="YOUR_API_KEY" Here's an example with the above two options turned on: ![](./.github/img/chat_playground.png) Note: If you enable public trace links, the internals of your chain will be exposed. We recommend only using this setting for demos or testing. Legacy Chains[​](#legacy-chains "Direct link to Legacy Chains") --------------------------------------------------------------- LangServe works with both Runnables (constructed via [LangChain Expression Language](https://python.langchain.com/docs/expression_language/)) and legacy chains (inheriting from `Chain`). However, some of the input schemas for legacy chains may be incomplete/incorrect, leading to errors. This can be fixed by updating the `input_schema` property of those chains in LangChain. If you encounter any errors, please open an issue on THIS repo, and we will work to address it. Deployment[​](#deployment "Direct link to Deployment") ------------------------------------------------------ ### Deploy to AWS[​](#deploy-to-aws "Direct link to Deploy to AWS") You can deploy to AWS using the [AWS Copilot CLI](https://aws.github.io/copilot-cli/) copilot init --app [application-name] --name [service-name] --type 'Load Balanced Web Service' --dockerfile './Dockerfile' --deploy Click [here](https://aws.amazon.com/containers/copilot/) to learn more. ### Deploy to Azure[​](#deploy-to-azure "Direct link to Deploy to Azure") You can deploy to Azure using Azure Container Apps (Serverless): az containerapp up --name [container-app-name] --source . --resource-group [resource-group-name] --environment [environment-name] --ingress external --target-port 8001 --env-vars=OPENAI_API_KEY=your_key You can find more info [here](https://learn.microsoft.com/en-us/azure/container-apps/containerapp-up) ### Deploy to GCP[​](#deploy-to-gcp "Direct link to Deploy to GCP") You can deploy to GCP Cloud Run using the following command: gcloud run deploy [your-service-name] --source . --port 8001 --allow-unauthenticated --region us-central1 --set-env-vars=OPENAI_API_KEY=your_key ### Community Contributed[​](#community-contributed "Direct link to Community Contributed") #### Deploy to Railway[​](#deploy-to-railway "Direct link to Deploy to Railway") [Example Railway Repo](https://github.com/PaulLockett/LangServe-Railway/tree/main) [![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/pW9tXP?referralCode=c-aq4K) Pydantic[​](#pydantic "Direct link to Pydantic") ------------------------------------------------ LangServe provides support for Pydantic 2 with some limitations. 1. OpenAPI docs will not be generated for invoke/batch/stream/stream\_log when using Pydantic V2. Fast API does not support \[mixing pydantic v1 and v2 namespaces\]. 2. LangChain uses the v1 namespace in Pydantic v2. Please read the [following guidelines to ensure compatibility with LangChain](https://github.com/langchain-ai/langchain/discussions/9337) Except for these limitations, we expect the API endpoints, the playground and any other features to work as expected. Advanced[​](#advanced "Direct link to Advanced") ------------------------------------------------ ### Handling Authentication[​](#handling-authentication "Direct link to Handling Authentication") If you need to add authentication to your server, please read Fast API's documentation about [dependencies](https://fastapi.tiangolo.com/tutorial/dependencies/) and [security](https://fastapi.tiangolo.com/tutorial/security/). The below examples show how to wire up authentication logic LangServe endpoints using FastAPI primitives. You are responsible for providing the actual authentication logic, the users table etc. If you're not sure what you're doing, you could try using an existing solution [Auth0](https://auth0.com/). #### Using add\_routes[​](#using-add_routes "Direct link to Using add_routes") If you're using `add_routes`, see examples [here](https://github.com/langchain-ai/langserve/tree/main/examples/auth). Description Links **Auth** with `add_routes`: Simple authentication that can be applied across all endpoints associated with app. (Not useful on its own for implementing per user logic.) [server](https://github.com/langchain-ai/langserve/tree/main/examples/auth/global_deps/server.py) **Auth** with `add_routes`: Simple authentication mechanism based on path dependencies. (No useful on its own for implementing per user logic.) [server](https://github.com/langchain-ai/langserve/tree/main/examples/auth/path_dependencies/server.py) **Auth** with `add_routes`: Implement per user logic and auth for endpoints that use per request config modifier. (**Note**: At the moment, does not integrate with OpenAPI docs.) [server](https://github.com/langchain-ai/langserve/tree/main/examples/auth/per_req_config_modifier/server.py), [client](https://github.com/langchain-ai/langserve/tree/main/examples/auth/per_req_config_modifier/client.ipynb) Alternatively, you can use FastAPI's [middleware](https://fastapi.tiangolo.com/tutorial/middleware/). Using global dependencies and path dependencies has the advantage that auth will be properly supported in the OpenAPI docs page, but these are not sufficient for implement per user logic (e.g., making an application that can search only within user owned documents). If you need to implement per user logic, you can use the `per_req_config_modifier` or `APIHandler` (below) to implement this logic. **Per User** If you need authorization or logic that is user dependent, specify `per_req_config_modifier` when using `add_routes`. Use a callable receives the raw `Request` object and can extract relevant information from it for authentication and authorization purposes. #### Using APIHandler[​](#using-apihandler "Direct link to Using APIHandler") If you feel comfortable with FastAPI and python, you can use LangServe's [APIHandler](https://github.com/langchain-ai/langserve/blob/main/examples/api_handler_examples/server.py). Description Links **Auth** with `APIHandler`: Implement per user logic and auth that shows how to search only within user owned documents. [server](https://github.com/langchain-ai/langserve/tree/main/examples/auth/api_handler/server.py), [client](https://github.com/langchain-ai/langserve/tree/main/examples/auth/api_handler/client.ipynb) **APIHandler** Shows how to use `APIHandler` instead of `add_routes`. This provides more flexibility for developers to define endpoints. Works well with all FastAPI patterns, but takes a bit more effort. [server](https://github.com/langchain-ai/langserve/tree/main/examples/api_handler_examples/server.py), [client](https://github.com/langchain-ai/langserve/tree/main/examples/api_handler_examples/client.ipynb) It's a bit more work, but gives you complete control over the endpoint definitions, so you can do whatever custom logic you need for auth. ### Files[​](#files "Direct link to Files") LLM applications often deal with files. There are different architectures that can be made to implement file processing; at a high level: 1. The file may be uploaded to the server via a dedicated endpoint and processed using a separate endpoint 2. The file may be uploaded by either value (bytes of file) or reference (e.g., s3 url to file content) 3. The processing endpoint may be blocking or non-blocking 4. If significant processing is required, the processing may be offloaded to a dedicated process pool You should determine what is the appropriate architecture for your application. Currently, to upload files by value to a runnable, use base64 encoding for the file (`multipart/form-data` is not supported yet). Here's an [example](https://github.com/langchain-ai/langserve/tree/main/examples/file_processing) that shows how to use base64 encoding to send a file to a remote runnable. Remember, you can always upload files by reference (e.g., s3 url) or upload them as multipart/form-data to a dedicated endpoint. ### Custom Input and Output Types[​](#custom-input-and-output-types "Direct link to Custom Input and Output Types") Input and Output types are defined on all runnables. You can access them via the `input_schema` and `output_schema` properties. `LangServe` uses these types for validation and documentation. If you want to override the default inferred types, you can use the `with_types` method. Here's a toy example to illustrate the idea: from typing import Anyfrom fastapi import FastAPIfrom langchain.schema.runnable import RunnableLambdaapp = FastAPI()def func(x: Any) -> int: """Mistyped function that should accept an int but accepts anything.""" return x + 1runnable = RunnableLambda(func).with_types( input_type=int,)add_routes(app, runnable) **API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) ### Custom User Types[​](#custom-user-types "Direct link to Custom User Types") Inherit from `CustomUserType` if you want the data to de-serialize into a pydantic model rather than the equivalent dict representation. At the moment, this type only works _server_ side and is used to specify desired _decoding_ behavior. If inheriting from this type the server will keep the decoded type as a pydantic model instead of converting it into a dict. from fastapi import FastAPIfrom langchain.schema.runnable import RunnableLambdafrom langserve import add_routesfrom langserve.schema import CustomUserTypeapp = FastAPI()class Foo(CustomUserType): bar: intdef func(foo: Foo) -> int: """Sample function that expects a Foo type which is a pydantic model""" assert isinstance(foo, Foo) return foo.bar# Note that the input and output type are automatically inferred!# You do not need to specify them.# runnable = RunnableLambda(func).with_types( # <-- Not needed in this case# input_type=Foo,# output_type=int,#add_routes(app, RunnableLambda(func), path="/foo") **API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) ### Playground Widgets[​](#playground-widgets "Direct link to Playground Widgets") The playground allows you to define custom widgets for your runnable from the backend. Here are a few examples: Description Links **Widgets** Different widgets that can be used with playground (file upload and chat) [server](https://github.com/langchain-ai/langserve/tree/main/examples/widgets/chat/tuples/server.py), [client](https://github.com/langchain-ai/langserve/tree/main/examples/widgets/client.ipynb) **Widgets** File upload widget used for LangServe playground. [server](https://github.com/langchain-ai/langserve/tree/main/examples/file_processing/server.py), [client](https://github.com/langchain-ai/langserve/tree/main/examples/file_processing/client.ipynb) #### Schema[​](#schema "Direct link to Schema") * A widget is specified at the field level and shipped as part of the JSON schema of the input type * A widget must contain a key called `type` with the value being one of a well known list of widgets * Other widget keys will be associated with values that describe paths in a JSON object type JsonPath = number | string | (number | string)[];type NameSpacedPath = { title: string; path: JsonPath }; // Using title to mimick json schema, but can use namespacetype OneOfPath = { oneOf: JsonPath[] };type Widget = { type: string; // Some well known type (e.g., base64file, chat etc.) [key: string]: JsonPath | NameSpacedPath | OneOfPath;}; ### Available Widgets[​](#available-widgets "Direct link to Available Widgets") There are only two widgets that the user can specify manually right now: 1. File Upload Widget 2. Chat History Widget See below more information about these widgets. All other widgets on the playground UI are created and managed automatically by the UI based on the config schema of the Runnable. When you create Configurable Runnables, the playground should create appropriate widgets for you to control the behavior. #### File Upload Widget[​](#file-upload-widget "Direct link to File Upload Widget") Allows creation of a file upload input in the UI playground for files that are uploaded as base64 encoded strings. Here's the full [example](https://github.com/langchain-ai/langserve/tree/main/examples/file_processing). Snippet: try: from pydantic.v1 import Fieldexcept ImportError: from pydantic import Fieldfrom langserve import CustomUserType# ATTENTION: Inherit from CustomUserType instead of BaseModel otherwise# the server will decode it into a dict instead of a pydantic model.class FileProcessingRequest(CustomUserType): """Request including a base64 encoded file.""" # The extra field is used to specify a widget for the playground UI. file: str = Field(..., extra={"widget": {"type": "base64file"}}) num_chars: int = 100 Example widget: ![](https://github.com/langchain-ai/langserve/assets/3205522/52199e46-9464-4c2e-8be8-222250e08c3f) ### Chat Widget[​](#chat-widget "Direct link to Chat Widget") Look at the [widget example](https://github.com/langchain-ai/langserve/tree/main/examples/widgets/chat/tuples/server.py). To define a chat widget, make sure that you pass "type": "chat". * "input" is JSONPath to the field in the _Request_ that has the new input message. * "output" is JSONPath to the field in the _Response_ that has new output message(s). * Don't specify these fields if the entire input or output should be used as they are ( e.g., if the output is a list of chat messages.) Here's a snippet: class ChatHistory(CustomUserType): chat_history: List[Tuple[str, str]] = Field( ..., examples=[[("human input", "ai response")]], extra={"widget": {"type": "chat", "input": "question", "output": "answer"}}, ) question: strdef _format_to_messages(input: ChatHistory) -> List[BaseMessage]: """Format the input to a list of messages.""" history = input.chat_history user_input = input.question messages = [] for human, ai in history: messages.append(HumanMessage(content=human)) messages.append(AIMessage(content=ai)) messages.append(HumanMessage(content=user_input)) return messagesmodel = ChatOpenAI()chat_model = RunnableParallel({"answer": (RunnableLambda(_format_to_messages) | model)})add_routes( app, chat_model.with_types(input_type=ChatHistory), config_keys=["configurable"], path="/chat",) Example widget: ![](https://github.com/langchain-ai/langserve/assets/3205522/a71ff37b-a6a9-4857-a376-cf27c41d3ca4) You can also specify a list of messages as your a parameter directly, as shown in this snippet: prompt = ChatPromptTemplate.from_messages( [ ("system", "You are a helpful assisstant named Cob."), MessagesPlaceholder(variable_name="messages"), ])chain = prompt | ChatAnthropic(model="claude-2")class MessageListInput(BaseModel): """Input for the chat endpoint.""" messages: List[Union[HumanMessage, AIMessage]] = Field( ..., description="The chat messages representing the current conversation.", extra={"widget": {"type": "chat", "input": "messages"}}, )add_routes( app, chain.with_types(input_type=MessageListInput), path="/chat",) See [this sample file](https://github.com/langchain-ai/langserve/tree/main/examples/widgets/chat/message_list/server.py) for an example. ### Enabling / Disabling Endpoints (LangServe >=0.0.33)[​](#enabling--disabling-endpoints-langserve-0033 "Direct link to Enabling / Disabling Endpoints (LangServe >=0.0.33)") You can enable / disable which endpoints are exposed when adding routes for a given chain. Use `enabled_endpoints` if you want to make sure to never get a new endpoint when upgrading langserve to a newer verison. Enable: The code below will only enable `invoke`, `batch` and the corresponding `config_hash` endpoint variants. add_routes(app, chain, enabled_endpoints=["invoke", "batch", "config_hashes"], path="/mychain") Disable: The code below will disable the playground for the chain add_routes(app, chain, disabled_endpoints=["playground"], path="/mychain") * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous Conceptual guide ](/v0.2/docs/concepts/)[ Next Overview ](/v0.2/docs/versions/overview/) * [Overview](#overview) * [Features](#features) * [Limitations](#limitations) * [Hosted LangServe](#hosted-langserve) * [Security](#security) * [Installation](#installation) * [LangChain CLI 🛠️](#langchain-cli-️) * [Setup](#setup) * [1\. Create new app using langchain cli command](#1-create-new-app-using-langchain-cli-command) * [2\. Define the runnable in add\_routes. Go to server.py and edit](#2-define-the-runnable-in-add_routes-go-to-serverpy-and-edit) * [3\. Use `poetry` to add 3rd party packages (e.g., langchain-openai, langchain-anthropic, langchain-mistral etc).](#3-use-poetry-to-add-3rd-party-packages-eg-langchain-openai-langchain-anthropic-langchain-mistral-etc) * [4\. Set up relevant env variables. For example,](#4-set-up-relevant-env-variables-for-example) * [5\. Serve your app](#5-serve-your-app) * [Examples](#examples) * [Sample Application](#sample-application) * [Server](#server) * [Docs](#docs) * [Client](#client) * [Endpoints](#endpoints) * [Playground](#playground) * [Widgets](#widgets) * [Sharing](#sharing) * [Chat playground](#chat-playground) * [Legacy Chains](#legacy-chains) * [Deployment](#deployment) * [Deploy to AWS](#deploy-to-aws) * [Deploy to Azure](#deploy-to-azure) * [Deploy to GCP](#deploy-to-gcp) * [Community Contributed](#community-contributed) * [Pydantic](#pydantic) * [Advanced](#advanced) * [Handling Authentication](#handling-authentication) * [Files](#files) * [Custom Input and Output Types](#custom-input-and-output-types) * [Custom User Types](#custom-user-types) * [Playground Widgets](#playground-widgets) * [Available Widgets](#available-widgets) * [Chat Widget](#chat-widget) * [Enabling / Disabling Endpoints (LangServe >=0.0.33)](#enabling--disabling-endpoints-langserve-0033)
null
https://python.langchain.com/v0.2/docs/how_to/chat_token_usage_tracking/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to track token usage in ChatModels On this page How to track token usage in ChatModels ====================================== Prerequisites This guide assumes familiarity with the following concepts: * [Chat models](/v0.2/docs/concepts/#chat-models) Tracking token usage to calculate cost is an important part of putting your app in production. This guide goes over how to obtain this information from your LangChain model calls. This guide requires `langchain-openai >= 0.1.8`. %pip install --upgrade --quiet langchain langchain-openai Using LangSmith[​](#using-langsmith "Direct link to Using LangSmith") --------------------------------------------------------------------- You can use [LangSmith](https://www.langchain.com/langsmith) to help track token usage in your LLM application. See the [LangSmith quick start guide](https://docs.smith.langchain.com/). Using AIMessage.usage\_metadata[​](#using-aimessageusage_metadata "Direct link to Using AIMessage.usage_metadata") ------------------------------------------------------------------------------------------------------------------ A number of model providers return token usage information as part of the chat generation response. When available, this information will be included on the `AIMessage` objects produced by the corresponding model. LangChain `AIMessage` objects include a [usage\_metadata](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage.usage_metadata) attribute. When populated, this attribute will be a [UsageMetadata](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.UsageMetadata.html) dictionary with standard keys (e.g., `"input_tokens"` and `"output_tokens"`). Examples: **OpenAI**: # # !pip install -qU langchain-openaifrom langchain_openai import ChatOpenAIllm = ChatOpenAI(model="gpt-3.5-turbo-0125")openai_response = llm.invoke("hello")openai_response.usage_metadata **API Reference:**[ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) {'input_tokens': 8, 'output_tokens': 9, 'total_tokens': 17} **Anthropic**: # !pip install -qU langchain-anthropicfrom langchain_anthropic import ChatAnthropicllm = ChatAnthropic(model="claude-3-haiku-20240307")anthropic_response = llm.invoke("hello")anthropic_response.usage_metadata **API Reference:**[ChatAnthropic](https://api.python.langchain.com/en/latest/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html) {'input_tokens': 8, 'output_tokens': 12, 'total_tokens': 20} ### Using AIMessage.response\_metadata[​](#using-aimessageresponse_metadata "Direct link to Using AIMessage.response_metadata") Metadata from the model response is also included in the AIMessage [response\_metadata](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage.response_metadata) attribute. These data are typically not standardized. Note that different providers adopt different conventions for representing token counts: print(f'OpenAI: {openai_response.response_metadata["token_usage"]}\n')print(f'Anthropic: {anthropic_response.response_metadata["usage"]}') OpenAI: {'completion_tokens': 9, 'prompt_tokens': 8, 'total_tokens': 17}Anthropic: {'input_tokens': 8, 'output_tokens': 12} ### Streaming[​](#streaming "Direct link to Streaming") Some providers support token count metadata in a streaming context. #### OpenAI[​](#openai "Direct link to OpenAI") For example, OpenAI will return a message [chunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html) at the end of a stream with token usage information. This behavior is supported by `langchain-openai >= 0.1.8` and can be enabled by setting `stream_options={"include_usage": True}`. note By default, the last message chunk in a stream will include a `"finish_reason"` in the message's `response_metadata` attribute. If we include token usage in streaming mode, an additional chunk containing usage metadata will be added to the end of the stream, such that `"finish_reason"` appears on the second to last message chunk. llm = ChatOpenAI(model="gpt-3.5-turbo-0125")aggregate = Nonefor chunk in llm.stream("hello", stream_options={"include_usage": True}): print(chunk) aggregate = chunk if aggregate is None else aggregate + chunk content='' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'content='Hello' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'content='!' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'content=' How' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'content=' can' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'content=' I' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'content=' assist' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'content=' you' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'content=' today' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'content='?' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'content='' response_metadata={'finish_reason': 'stop'} id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'content='' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf' usage_metadata={'input_tokens': 8, 'output_tokens': 9, 'total_tokens': 17} Note that the usage metadata will be included in the sum of the individual message chunks: print(aggregate.content)print(aggregate.usage_metadata) Hello! How can I assist you today?{'input_tokens': 8, 'output_tokens': 9, 'total_tokens': 17} To disable streaming token counts for OpenAI, set `"include_usage"` to False in `stream_options`, or omit it from the parameters: aggregate = Nonefor chunk in llm.stream("hello"): print(chunk) content='' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'content='Hello' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'content='!' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'content=' How' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'content=' can' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'content=' I' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'content=' assist' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'content=' you' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'content=' today' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'content='?' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'content='' response_metadata={'finish_reason': 'stop'} id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52' You can also enable streaming token usage by setting `model_kwargs` when instantiating the chat model. This can be useful when incorporating chat models into LangChain [chains](/v0.2/docs/concepts/#langchain-expression-language-lcel): usage metadata can be monitored when [streaming intermediate steps](/v0.2/docs/how_to/streaming/#using-stream-events) or using tracing software such as [LangSmith](https://docs.smith.langchain.com/). See the below example, where we return output structured to a desired schema, but can still observe token usage streamed from intermediate steps. from langchain_core.pydantic_v1 import BaseModel, Fieldclass Joke(BaseModel): """Joke to tell user.""" setup: str = Field(description="question to set up a joke") punchline: str = Field(description="answer to resolve the joke")llm = ChatOpenAI( model="gpt-3.5-turbo-0125", model_kwargs={"stream_options": {"include_usage": True}},)# Under the hood, .with_structured_output binds tools to the# chat model and appends a parser.structured_llm = llm.with_structured_output(Joke)async for event in structured_llm.astream_events("Tell me a joke", version="v2"): if event["event"] == "on_chat_model_end": print(f'Token usage: {event["data"]["output"].usage_metadata}\n') elif event["event"] == "on_chain_end": print(event["data"]["output"]) else: pass Token usage: {'input_tokens': 79, 'output_tokens': 23, 'total_tokens': 102}setup='Why was the math book sad?' punchline='Because it had too many problems.' Token usage is also visible in the corresponding [LangSmith trace](https://smith.langchain.com/public/fe6513d5-7212-4045-82e0-fefa28bc7656/r) in the payload from the chat model. Using callbacks[​](#using-callbacks "Direct link to Using callbacks") --------------------------------------------------------------------- There are also some API-specific callback context managers that allow you to track token usage across multiple calls. It is currently only implemented for the OpenAI API and Bedrock Anthropic API. ### OpenAI[​](#openai-1 "Direct link to OpenAI") Let's first look at an extremely simple example of tracking token usage for a single Chat model call. # !pip install -qU langchain-community wikipediafrom langchain_community.callbacks.manager import get_openai_callbackllm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)with get_openai_callback() as cb: result = llm.invoke("Tell me a joke") print(cb) **API Reference:**[get\_openai\_callback](https://api.python.langchain.com/en/latest/callbacks/langchain_community.callbacks.manager.get_openai_callback.html) Tokens Used: 27 Prompt Tokens: 11 Completion Tokens: 16Successful Requests: 1Total Cost (USD): $2.95e-05 Anything inside the context manager will get tracked. Here's an example of using it to track multiple calls in sequence. with get_openai_callback() as cb: result = llm.invoke("Tell me a joke") result2 = llm.invoke("Tell me a joke") print(cb.total_tokens) 55 note Cost information is currently not available in streaming mode. This is because model names are currently not propagated through chunks in streaming mode, and the model name is used to look up the correct pricing. Token counts however are available: with get_openai_callback() as cb: for chunk in llm.stream("Tell me a joke", stream_options={"include_usage": True}): pass print(cb.total_tokens) 28 If a chain or agent with multiple steps in it is used, it will track all those steps. from langchain.agents import AgentExecutor, create_tool_calling_agent, load_toolsfrom langchain_core.prompts import ChatPromptTemplateprompt = ChatPromptTemplate.from_messages( [ ("system", "You're a helpful assistant"), ("human", "{input}"), ("placeholder", "{agent_scratchpad}"), ])tools = load_tools(["wikipedia"])agent = create_tool_calling_agent(llm, tools, prompt)agent_executor = AgentExecutor( agent=agent, tools=tools, verbose=True, stream_runnable=False) **API Reference:**[AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html) | [create\_tool\_calling\_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.tool_calling_agent.base.create_tool_calling_agent.html) | [load\_tools](https://api.python.langchain.com/en/latest/agent_toolkits/langchain_community.agent_toolkits.load_tools.load_tools.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) note We have to set `stream_runnable=False` for cost information, as described above. By default the AgentExecutor will stream the underlying agent so that you can get the most granular results when streaming events via AgentExecutor.stream\_events. with get_openai_callback() as cb: response = agent_executor.invoke( { "input": "What's a hummingbird's scientific name and what's the fastest bird species?" } ) print(f"Total Tokens: {cb.total_tokens}") print(f"Prompt Tokens: {cb.prompt_tokens}") print(f"Completion Tokens: {cb.completion_tokens}") print(f"Total Cost (USD): ${cb.total_cost}") > Entering new AgentExecutor chain...Invoking: `wikipedia` with `{'query': 'hummingbird scientific name'}`Page: HummingbirdSummary: Hummingbirds are birds native to the Americas and comprise the biological family Trochilidae. With approximately 366 species and 113 genera, they occur from Alaska to Tierra del Fuego, but most species are found in Central and South America. As of 2024, 21 hummingbird species are listed as endangered or critically endangered, with numerous species declining in population.Hummingbirds have varied specialized characteristics to enable rapid, maneuverable flight: exceptional metabolic capacity, adaptations to high altitude, sensitive visual and communication abilities, and long-distance migration in some species. Among all birds, male hummingbirds have the widest diversity of plumage color, particularly in blues, greens, and purples. Hummingbirds are the smallest mature birds, measuring 7.5–13 cm (3–5 in) in length. The smallest is the 5 cm (2.0 in) bee hummingbird, which weighs less than 2.0 g (0.07 oz), and the largest is the 23 cm (9 in) giant hummingbird, weighing 18–24 grams (0.63–0.85 oz). Noted for long beaks, hummingbirds are specialized for feeding on flower nectar, but all species also consume small insects.They are known as hummingbirds because of the humming sound created by their beating wings, which flap at high frequencies audible to other birds and humans. They hover at rapid wing-flapping rates, which vary from around 12 beats per second in the largest species to 80 per second in small hummingbirds.Hummingbirds have the highest mass-specific metabolic rate of any homeothermic animal. To conserve energy when food is scarce and at night when not foraging, they can enter torpor, a state similar to hibernation, and slow their metabolic rate to 1⁄15 of its normal rate. While most hummingbirds do not migrate, the rufous hummingbird has one of the longest migrations among birds, traveling twice per year between Alaska and Mexico, a distance of about 3,900 miles (6,300 km).Hummingbirds split from their sister group, the swifts and treeswifts, around 42 million years ago. The oldest known fossil hummingbird is Eurotrochilus, from the Rupelian Stage of Early Oligocene Europe.Page: Rufous hummingbirdSummary: The rufous hummingbird (Selasphorus rufus) is a small hummingbird, about 8 cm (3.1 in) long with a long, straight and slender bill. These birds are known for their extraordinary flight skills, flying 2,000 mi (3,200 km) during their migratory transits. It is one of nine species in the genus Selasphorus.Page: Anna's hummingbirdSummary: Anna's hummingbird (Calypte anna) is a North American species of hummingbird. It was named after Anna Masséna, Duchess of Rivoli.It is native to western coastal regions of North America. In the early 20th century, Anna's hummingbirds bred only in northern Baja California and Southern California. The transplanting of exotic ornamental plants in residential areas throughout the Pacific coast and inland deserts provided expanded nectar and nesting sites, allowing the species to expand its breeding range. Year-round residence of Anna's hummingbirds in the Pacific Northwest is an example of ecological release dependent on acclimation to colder winter temperatures, introduced plants, and human provision of nectar feeders during winter.These birds feed on nectar from flowers using a long extendable tongue. They also consume small insects and other arthropods caught in flight or gleaned from vegetation.Invoking: `wikipedia` with `{'query': 'fastest bird species'}`Page: List of birds by flight speedSummary: This is a list of the fastest flying birds in the world. A bird's velocity is necessarily variable; a hunting bird will reach much greater speeds while diving to catch prey than when flying horizontally. The bird that can achieve the greatest airspeed is the peregrine falcon (Falco peregrinus), able to exceed 320 km/h (200 mph) in its dives. A close relative of the common swift, the white-throated needletail (Hirundapus caudacutus), is commonly reported as the fastest bird in level flight with a reported top speed of 169 km/h (105 mph). This record remains unconfirmed as the measurement methods have never been published or verified. The record for the fastest confirmed level flight by a bird is 111.5 km/h (69.3 mph) held by the common swift.Page: Fastest animalsSummary: This is a list of the fastest animals in the world, by types of animal.Page: FalconSummary: Falcons () are birds of prey in the genus Falco, which includes about 40 species. Falcons are widely distributed on all continents of the world except Antarctica, though closely related raptors did occur there in the Eocene.Adult falcons have thin, tapered wings, which enable them to fly at high speed and change direction rapidly. Fledgling falcons, in their first year of flying, have longer flight feathers, which make their configuration more like that of a general-purpose bird such as a broad wing. This makes flying easier while learning the exceptional skills required to be effective hunters as adults.The falcons are the largest genus in the Falconinae subfamily of Falconidae, which itself also includes another subfamily comprising caracaras and a few other species. All these birds kill with their beaks, using a tomial "tooth" on the side of their beaks—unlike the hawks, eagles, and other birds of prey in the Accipitridae, which use their feet.The largest falcon is the gyrfalcon at up to 65 cm in length. The smallest falcon species is the pygmy falcon, which measures just 20 cm. As with hawks and owls, falcons exhibit sexual dimorphism, with the females typically larger than the males, thus allowing a wider range of prey species.Some small falcons with long, narrow wings are called "hobbies" and some which hover while hunting are called "kestrels".As is the case with many birds of prey, falcons have exceptional powers of vision; the visual acuity of one species has been measured at 2.6 times that of a normal human. Peregrine falcons have been recorded diving at speeds of 320 km/h (200 mph), making them the fastest-moving creatures on Earth; the fastest recorded dive attained a vertical speed of 390 km/h (240 mph).The scientific name for a hummingbird is Trochilidae. The fastest bird species is the peregrine falcon (Falco peregrinus), which can exceed speeds of 320 km/h (200 mph) in its dives.> Finished chain.Total Tokens: 1787Prompt Tokens: 1687Completion Tokens: 100Total Cost (USD): $0.0009935 ### Bedrock Anthropic[​](#bedrock-anthropic "Direct link to Bedrock Anthropic") The `get_bedrock_anthropic_callback` works very similarly: # !pip install langchain-awsfrom langchain_aws import ChatBedrockfrom langchain_community.callbacks.manager import get_bedrock_anthropic_callbackllm = ChatBedrock(model_id="anthropic.claude-v2")with get_bedrock_anthropic_callback() as cb: result = llm.invoke("Tell me a joke") result2 = llm.invoke("Tell me a joke") print(cb) **API Reference:**[get\_bedrock\_anthropic\_callback](https://api.python.langchain.com/en/latest/callbacks/langchain_community.callbacks.manager.get_bedrock_anthropic_callback.html) Tokens Used: 96 Prompt Tokens: 26 Completion Tokens: 70Successful Requests: 2Total Cost (USD): $0.001888 Next steps[​](#next-steps "Direct link to Next steps") ------------------------------------------------------ You've now seen a few examples of how to track token usage for supported providers. Next, check out the other how-to guides chat models in this section, like [how to get a model to return structured output](/v0.2/docs/how_to/structured_output/) or [how to add caching to your chat models](/v0.2/docs/how_to/chat_model_caching/). [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/chat_token_usage_tracking.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to init any model in one line ](/v0.2/docs/how_to/chat_models_universal_init/)[ Next How to add tools to chatbots ](/v0.2/docs/how_to/chatbots_tools/) * [Using LangSmith](#using-langsmith) * [Using AIMessage.usage\_metadata](#using-aimessageusage_metadata) * [Using AIMessage.response\_metadata](#using-aimessageresponse_metadata) * [Streaming](#streaming) * [Using callbacks](#using-callbacks) * [OpenAI](#openai-1) * [Bedrock Anthropic](#bedrock-anthropic) * [Next steps](#next-steps)
null
https://python.langchain.com/v0.2/docs/how_to/query_constructing_filters/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to construct filters for query analysis How to construct filters for query analysis =========================================== We may want to do query analysis to extract filters to pass into retrievers. One way we ask the LLM to represent these filters is as a Pydantic model. There is then the issue of converting that Pydantic model into a filter that can be passed into a retriever. This can be done manually, but LangChain also provides some "Translators" that are able to translate from a common syntax into filters specific to each retriever. Here, we will cover how to use those translators. from typing import Optionalfrom langchain.chains.query_constructor.ir import ( Comparator, Comparison, Operation, Operator, StructuredQuery,)from langchain.retrievers.self_query.chroma import ChromaTranslatorfrom langchain.retrievers.self_query.elasticsearch import ElasticsearchTranslatorfrom langchain_core.pydantic_v1 import BaseModel **API Reference:**[Comparator](https://api.python.langchain.com/en/latest/structured_query/langchain_core.structured_query.Comparator.html) | [Comparison](https://api.python.langchain.com/en/latest/structured_query/langchain_core.structured_query.Comparison.html) | [Operation](https://api.python.langchain.com/en/latest/structured_query/langchain_core.structured_query.Operation.html) | [Operator](https://api.python.langchain.com/en/latest/structured_query/langchain_core.structured_query.Operator.html) | [StructuredQuery](https://api.python.langchain.com/en/latest/structured_query/langchain_core.structured_query.StructuredQuery.html) | [ChromaTranslator](https://api.python.langchain.com/en/latest/query_constructors/langchain_community.query_constructors.chroma.ChromaTranslator.html) | [ElasticsearchTranslator](https://api.python.langchain.com/en/latest/query_constructors/langchain_community.query_constructors.elasticsearch.ElasticsearchTranslator.html) In this example, `year` and `author` are both attributes to filter on. class Search(BaseModel): query: str start_year: Optional[int] author: Optional[str] search_query = Search(query="RAG", start_year=2022, author="LangChain") def construct_comparisons(query: Search): comparisons = [] if query.start_year is not None: comparisons.append( Comparison( comparator=Comparator.GT, attribute="start_year", value=query.start_year, ) ) if query.author is not None: comparisons.append( Comparison( comparator=Comparator.EQ, attribute="author", value=query.author, ) ) return comparisons comparisons = construct_comparisons(search_query) _filter = Operation(operator=Operator.AND, arguments=comparisons) ElasticsearchTranslator().visit_operation(_filter) {'bool': {'must': [{'range': {'metadata.start_year': {'gt': 2022}}}, {'term': {'metadata.author.keyword': 'LangChain'}}]}} ChromaTranslator().visit_operation(_filter) {'$and': [{'start_year': {'$gt': 2022}}, {'author': {'$eq': 'LangChain'}}]} [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/query_constructing_filters.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to add values to a chain's state ](/v0.2/docs/how_to/assign/)[ Next How to configure runtime chain internals ](/v0.2/docs/how_to/configure/)
null
https://python.langchain.com/v0.2/docs/versions/release_policy/
* [](/v0.2/) * Versions * Release Policy On this page LangChain releases ================== The LangChain ecosystem is composed of different component packages (e.g., `langchain-core`, `langchain`, `langchain-community`, `langgraph`, `langserve`, partner packages etc.) Versioning[​](#versioning "Direct link to Versioning") ------------------------------------------------------ ### `langchain` and `langchain-core`[​](#langchain-and-langchain-core "Direct link to langchain-and-langchain-core") `langchain` and `langchain-core` follow [semantic versioning](https://semver.org/) in the format of 0.**Y**.**Z**. The packages are under rapid development, and so are currently versioning the packages with a major version of 0. Minor version increases will occur for: * Breaking changes for any public interfaces marked as `beta`. Patch version increases will occur for: * Bug fixes * New features * Any changes to private interfaces * Any changes to `beta` features When upgrading between minor versions, users should review the list of breaking changes and deprecations. From time to time, we will version packages as **release candidates**. These are versions that are intended to be released as stable versions, but we want to get feedback from the community before doing so. Release candidates will be versioned as 0.**Y**.**Z**rc**N**. For example, 0.2.0rc1. If no issues are found, the release candidate will be released as a stable version with the same version number. If issues are found, we will release a new release candidate with an incremented `N` value (e.g., 0.2.0rc2). ### Other packages in the langchain ecosystem[​](#other-packages-in-the-langchain-ecosystem "Direct link to Other packages in the langchain ecosystem") Other packages in the ecosystem (including user packages) can follow a different versioning scheme, but are generally expected to pin to specific minor versions of `langchain` and `langchain-core`. Release cadence[​](#release-cadence "Direct link to Release cadence") --------------------------------------------------------------------- We expect to space out **minor** releases (e.g., from 0.2.0 to 0.3.0) of `langchain` and `langchain-core` by at least 2-3 months, as such releases may contain breaking changes. Patch versions are released frequently as they contain bug fixes and new features. API stability[​](#api-stability "Direct link to API stability") --------------------------------------------------------------- The development of LLM applications is a rapidly evolving field, and we are constantly learning from our users and the community. As such, we expect that the APIs in `langchain` and `langchain-core` will continue to evolve to better serve the needs of our users. Even though both `langchain` and `langchain-core` are currently in a pre-1.0 state, we are committed to maintaining API stability in these packages. * Breaking changes to the public API will result in a minor version bump (the second digit) * Any bug fixes or new features will result in a patch version bump (the third digit) We will generally try to avoid making unnecessary changes, and will provide a deprecation policy for features that are being removed. ### Stability of other packages[​](#stability-of-other-packages "Direct link to Stability of other packages") The stability of other packages in the LangChain ecosystem may vary: * `langchain-community` is a community maintained package that contains 3rd party integrations. While we do our best to review and test changes in `langchain-community`, `langchain-community` is expected to experience more breaking changes than `langchain` and `langchain-core` as it contains many community contributions. * Partner packages may follow different stability and versioning policies, and users should refer to the documentation of those packages for more information; however, in general these packages are expected to be stable. ### What is a "API stability"?[​](#what-is-a-api-stability "Direct link to What is a \"API stability\"?") API stability means: * All the public APIs (everything in this documentation) will not be moved or renamed without providing backwards-compatible aliases. * If new features are added to these APIs – which is quite possible – they will not break or change the meaning of existing methods. In other words, "stable" does not (necessarily) mean "complete." * If, for some reason, an API declared stable must be removed or replaced, it will be declared deprecated but will remain in the API for at least two minor releases. Warnings will be issued when the deprecated method is called. ### **APIs marked as internal**[​](#apis-marked-as-internal "Direct link to apis-marked-as-internal") Certain APIs are explicitly marked as “internal” in a couple of ways: * Some documentation refers to internals and mentions them as such. If the documentation says that something is internal, it may change. * Functions, methods, and other objects prefixed by a leading underscore (**`_`**). This is the standard Python convention of indicating that something is private; if any method starts with a single **`_`**, it’s an internal API. * **Exception:** Certain methods are prefixed with `_` , but do not contain an implementation. These methods are _meant_ to be overridden by sub-classes that provide the implementation. Such methods are generally part of the **Public API** of LangChain. Deprecation policy[​](#deprecation-policy "Direct link to Deprecation policy") ------------------------------------------------------------------------------ We will generally avoid deprecating features until a better alternative is available. When a feature is deprecated, it will continue to work in the current and next minor version of `langchain` and `langchain-core`. After that, the feature will be removed. Since we're expecting to space out minor releases by at least 2-3 months, this means that a feature can be removed within 2-6 months of being deprecated. In some situations, we may allow the feature to remain in the code base for longer periods of time, if it's not causing issues in the packages, to reduce the burden on users. [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/versions/release_policy.mdx) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous Overview ](/v0.2/docs/versions/overview/)[ Next Packages ](/v0.2/docs/versions/packages/) * [Versioning](#versioning) * [`langchain` and `langchain-core`](#langchain-and-langchain-core) * [Other packages in the langchain ecosystem](#other-packages-in-the-langchain-ecosystem) * [Release cadence](#release-cadence) * [API stability](#api-stability) * [Stability of other packages](#stability-of-other-packages) * [What is a "API stability"?](#what-is-a-api-stability) * [**APIs marked as internal**](#apis-marked-as-internal) * [Deprecation policy](#deprecation-policy)
null
https://python.langchain.com/v0.2/docs/how_to/configure/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to configure runtime chain internals On this page How to configure runtime chain internals ======================================== Prerequisites This guide assumes familiarity with the following concepts: * [LangChain Expression Language (LCEL)](/v0.2/docs/concepts/#langchain-expression-language) * [Chaining runnables](/v0.2/docs/how_to/sequence/) * [Binding runtime arguments](/v0.2/docs/how_to/binding/) Sometimes you may want to experiment with, or even expose to the end user, multiple different ways of doing things within your chains. This can include tweaking parameters such as temperature or even swapping out one model for another. In order to make this experience as easy as possible, we have defined two methods. * A `configurable_fields` method. This lets you configure particular fields of a runnable. * This is related to the [`.bind`](/v0.2/docs/how_to/binding/) method on runnables, but allows you to specify parameters for a given step in a chain at runtime rather than specifying them beforehand. * A `configurable_alternatives` method. With this method, you can list out alternatives for any particular runnable that can be set during runtime, and swap them for those specified alternatives. Configurable Fields[​](#configurable-fields "Direct link to Configurable Fields") --------------------------------------------------------------------------------- Let's walk through an example that configures chat model fields like temperature at runtime: %pip install --upgrade --quiet langchain langchain-openaiimport osfrom getpass import getpassos.environ["OPENAI_API_KEY"] = getpass() WARNING: You are using pip version 22.0.4; however, version 24.0 is available.You should consider upgrading via the '/Users/jacoblee/.pyenv/versions/3.10.5/bin/python -m pip install --upgrade pip' command.Note: you may need to restart the kernel to use updated packages. from langchain_core.prompts import PromptTemplatefrom langchain_core.runnables import ConfigurableFieldfrom langchain_openai import ChatOpenAImodel = ChatOpenAI(temperature=0).configurable_fields( temperature=ConfigurableField( id="llm_temperature", name="LLM Temperature", description="The temperature of the LLM", ))model.invoke("pick a random number") **API Reference:**[PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html) | [ConfigurableField](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.utils.ConfigurableField.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) AIMessage(content='17', response_metadata={'token_usage': {'completion_tokens': 1, 'prompt_tokens': 11, 'total_tokens': 12}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_c2295e73ad', 'finish_reason': 'stop', 'logprobs': None}, id='run-ba26a0da-0a69-4533-ab7f-21178a73d303-0') Above, we defined `temperature` as a [`ConfigurableField`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.utils.ConfigurableField.html#langchain_core.runnables.utils.ConfigurableField) that we can set at runtime. To do so, we use the [`with_config`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_config) method like this: model.with_config(configurable={"llm_temperature": 0.9}).invoke("pick a random number") AIMessage(content='12', response_metadata={'token_usage': {'completion_tokens': 1, 'prompt_tokens': 11, 'total_tokens': 12}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_c2295e73ad', 'finish_reason': 'stop', 'logprobs': None}, id='run-ba8422ad-be77-4cb1-ac45-ad0aae74e3d9-0') Note that the passed `llm_temperature` entry in the dict has the same key as the `id` of the `ConfigurableField`. We can also do this to affect just one step that's part of a chain: prompt = PromptTemplate.from_template("Pick a random number above {x}")chain = prompt | modelchain.invoke({"x": 0}) AIMessage(content='27', response_metadata={'token_usage': {'completion_tokens': 1, 'prompt_tokens': 14, 'total_tokens': 15}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_c2295e73ad', 'finish_reason': 'stop', 'logprobs': None}, id='run-ecd4cadd-1b72-4f92-b9a0-15e08091f537-0') chain.with_config(configurable={"llm_temperature": 0.9}).invoke({"x": 0}) AIMessage(content='35', response_metadata={'token_usage': {'completion_tokens': 1, 'prompt_tokens': 14, 'total_tokens': 15}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_c2295e73ad', 'finish_reason': 'stop', 'logprobs': None}, id='run-a916602b-3460-46d3-a4a8-7c926ec747c0-0') ### With HubRunnables[​](#with-hubrunnables "Direct link to With HubRunnables") This is useful to allow for switching of prompts from langchain.runnables.hub import HubRunnableprompt = HubRunnable("rlm/rag-prompt").configurable_fields( owner_repo_commit=ConfigurableField( id="hub_commit", name="Hub Commit", description="The Hub commit to pull from", ))prompt.invoke({"question": "foo", "context": "bar"}) **API Reference:**[HubRunnable](https://api.python.langchain.com/en/latest/runnables/langchain.runnables.hub.HubRunnable.html) ChatPromptValue(messages=[HumanMessage(content="You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.\nQuestion: foo \nContext: bar \nAnswer:")]) prompt.with_config(configurable={"hub_commit": "rlm/rag-prompt-llama"}).invoke( {"question": "foo", "context": "bar"}) ChatPromptValue(messages=[HumanMessage(content="[INST]<<SYS>> You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.<</SYS>> \nQuestion: foo \nContext: bar \nAnswer: [/INST]")]) Configurable Alternatives[​](#configurable-alternatives "Direct link to Configurable Alternatives") --------------------------------------------------------------------------------------------------- The `configurable_alternatives()` method allows us to swap out steps in a chain with an alternative. Below, we swap out one chat model for another: %pip install --upgrade --quiet langchain-anthropicimport osfrom getpass import getpassos.environ["ANTHROPIC_API_KEY"] = getpass() WARNING: You are using pip version 22.0.4; however, version 24.0 is available.You should consider upgrading via the '/Users/jacoblee/.pyenv/versions/3.10.5/bin/python -m pip install --upgrade pip' command.Note: you may need to restart the kernel to use updated packages. from langchain_anthropic import ChatAnthropicfrom langchain_core.prompts import PromptTemplatefrom langchain_core.runnables import ConfigurableFieldfrom langchain_openai import ChatOpenAIllm = ChatAnthropic( model="claude-3-haiku-20240307", temperature=0).configurable_alternatives( # This gives this field an id # When configuring the end runnable, we can then use this id to configure this field ConfigurableField(id="llm"), # This sets a default_key. # If we specify this key, the default LLM (ChatAnthropic initialized above) will be used default_key="anthropic", # This adds a new option, with name `openai` that is equal to `ChatOpenAI()` openai=ChatOpenAI(), # This adds a new option, with name `gpt4` that is equal to `ChatOpenAI(model="gpt-4")` gpt4=ChatOpenAI(model="gpt-4"), # You can add more configuration options here)prompt = PromptTemplate.from_template("Tell me a joke about {topic}")chain = prompt | llm# By default it will call Anthropicchain.invoke({"topic": "bears"}) **API Reference:**[ChatAnthropic](https://api.python.langchain.com/en/latest/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html) | [PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html) | [ConfigurableField](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.utils.ConfigurableField.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) AIMessage(content="Here's a bear joke for you:\n\nWhy don't bears wear socks? \nBecause they have bear feet!\n\nHow's that? I tried to come up with a simple, silly pun-based joke about bears. Puns and wordplay are a common way to create humorous bear jokes. Let me know if you'd like to hear another one!", response_metadata={'id': 'msg_018edUHh5fUbWdiimhrC3dZD', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 13, 'output_tokens': 80}}, id='run-775bc58c-28d7-4e6b-a268-48fa6661f02f-0') # We can use `.with_config(configurable={"llm": "openai"})` to specify an llm to usechain.with_config(configurable={"llm": "openai"}).invoke({"topic": "bears"}) AIMessage(content="Why don't bears like fast food?\n\nBecause they can't catch it!", response_metadata={'token_usage': {'completion_tokens': 15, 'prompt_tokens': 13, 'total_tokens': 28}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_c2295e73ad', 'finish_reason': 'stop', 'logprobs': None}, id='run-7bdaa992-19c9-4f0d-9a0c-1f326bc992d4-0') # If we use the `default_key` then it uses the defaultchain.with_config(configurable={"llm": "anthropic"}).invoke({"topic": "bears"}) AIMessage(content="Here's a bear joke for you:\n\nWhy don't bears wear socks? \nBecause they have bear feet!\n\nHow's that? I tried to come up with a simple, silly pun-based joke about bears. Puns and wordplay are a common way to create humorous bear jokes. Let me know if you'd like to hear another one!", response_metadata={'id': 'msg_01BZvbmnEPGBtcxRWETCHkct', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 13, 'output_tokens': 80}}, id='run-59b6ee44-a1cd-41b8-a026-28ee67cdd718-0') ### With Prompts[​](#with-prompts "Direct link to With Prompts") We can do a similar thing, but alternate between prompts llm = ChatAnthropic(model="claude-3-haiku-20240307", temperature=0)prompt = PromptTemplate.from_template( "Tell me a joke about {topic}").configurable_alternatives( # This gives this field an id # When configuring the end runnable, we can then use this id to configure this field ConfigurableField(id="prompt"), # This sets a default_key. # If we specify this key, the default LLM (ChatAnthropic initialized above) will be used default_key="joke", # This adds a new option, with name `poem` poem=PromptTemplate.from_template("Write a short poem about {topic}"), # You can add more configuration options here)chain = prompt | llm# By default it will write a jokechain.invoke({"topic": "bears"}) AIMessage(content="Here's a bear joke for you:\n\nWhy don't bears wear socks? \nBecause they have bear feet!", response_metadata={'id': 'msg_01DtM1cssjNFZYgeS3gMZ49H', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 13, 'output_tokens': 28}}, id='run-8199af7d-ea31-443d-b064-483693f2e0a1-0') # We can configure it write a poemchain.with_config(configurable={"prompt": "poem"}).invoke({"topic": "bears"}) AIMessage(content="Here is a short poem about bears:\n\nMajestic bears, strong and true,\nRoaming the forests, wild and free.\nPowerful paws, fur soft and brown,\nCommanding respect, nature's crown.\n\nForaging for berries, fishing streams,\nProtecting their young, fierce and keen.\nMighty bears, a sight to behold,\nGuardians of the wilderness, untold.\n\nIn the wild they reign supreme,\nEmbodying nature's grand theme.\nBears, a symbol of strength and grace,\nCaptivating all who see their face.", response_metadata={'id': 'msg_01Wck3qPxrjURtutvtodaJFn', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 13, 'output_tokens': 134}}, id='run-69414a1e-51d7-4bec-a307-b34b7d61025e-0') ### With Prompts and LLMs[​](#with-prompts-and-llms "Direct link to With Prompts and LLMs") We can also have multiple things configurable! Here's an example doing that with both prompts and LLMs. llm = ChatAnthropic( model="claude-3-haiku-20240307", temperature=0).configurable_alternatives( # This gives this field an id # When configuring the end runnable, we can then use this id to configure this field ConfigurableField(id="llm"), # This sets a default_key. # If we specify this key, the default LLM (ChatAnthropic initialized above) will be used default_key="anthropic", # This adds a new option, with name `openai` that is equal to `ChatOpenAI()` openai=ChatOpenAI(), # This adds a new option, with name `gpt4` that is equal to `ChatOpenAI(model="gpt-4")` gpt4=ChatOpenAI(model="gpt-4"), # You can add more configuration options here)prompt = PromptTemplate.from_template( "Tell me a joke about {topic}").configurable_alternatives( # This gives this field an id # When configuring the end runnable, we can then use this id to configure this field ConfigurableField(id="prompt"), # This sets a default_key. # If we specify this key, the default LLM (ChatAnthropic initialized above) will be used default_key="joke", # This adds a new option, with name `poem` poem=PromptTemplate.from_template("Write a short poem about {topic}"), # You can add more configuration options here)chain = prompt | llm# We can configure it write a poem with OpenAIchain.with_config(configurable={"prompt": "poem", "llm": "openai"}).invoke( {"topic": "bears"}) AIMessage(content="In the forest deep and wide,\nBears roam with grace and pride.\nWith fur as dark as night,\nThey rule the land with all their might.\n\nIn winter's chill, they hibernate,\nIn spring they emerge, hungry and great.\nWith claws sharp and eyes so keen,\nThey hunt for food, fierce and lean.\n\nBut beneath their tough exterior,\nLies a gentle heart, warm and superior.\nThey love their cubs with all their might,\nProtecting them through day and night.\n\nSo let us admire these majestic creatures,\nIn awe of their strength and features.\nFor in the wild, they reign supreme,\nThe mighty bears, a timeless dream.", response_metadata={'token_usage': {'completion_tokens': 133, 'prompt_tokens': 13, 'total_tokens': 146}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_c2295e73ad', 'finish_reason': 'stop', 'logprobs': None}, id='run-5eec0b96-d580-49fd-ac4e-e32a0803b49b-0') # We can always just configure only one if we wantchain.with_config(configurable={"llm": "openai"}).invoke({"topic": "bears"}) AIMessage(content="Why don't bears wear shoes?\n\nBecause they have bear feet!", response_metadata={'token_usage': {'completion_tokens': 13, 'prompt_tokens': 13, 'total_tokens': 26}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_c2295e73ad', 'finish_reason': 'stop', 'logprobs': None}, id='run-c1b14c9c-4988-49b8-9363-15bfd479973a-0') ### Saving configurations[​](#saving-configurations "Direct link to Saving configurations") We can also easily save configured chains as their own objects openai_joke = chain.with_config(configurable={"llm": "openai"})openai_joke.invoke({"topic": "bears"}) AIMessage(content="Why did the bear break up with his girlfriend? \nBecause he couldn't bear the relationship anymore!", response_metadata={'token_usage': {'completion_tokens': 20, 'prompt_tokens': 13, 'total_tokens': 33}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_c2295e73ad', 'finish_reason': 'stop', 'logprobs': None}, id='run-391ebd55-9137-458b-9a11-97acaff6a892-0') Next steps[​](#next-steps "Direct link to Next steps") ------------------------------------------------------ You now know how to configure a chain's internal steps at runtime. To learn more, see the other how-to guides on runnables in this section, including: * Using [.bind()](/v0.2/docs/how_to/binding/) as a simpler way to set a runnable's runtime parameters [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/configure.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to construct filters for query analysis ](/v0.2/docs/how_to/query_constructing_filters/)[ Next How deal with high cardinality categoricals when doing query analysis ](/v0.2/docs/how_to/query_high_cardinality/) * [Configurable Fields](#configurable-fields) * [With HubRunnables](#with-hubrunnables) * [Configurable Alternatives](#configurable-alternatives) * [With Prompts](#with-prompts) * [With Prompts and LLMs](#with-prompts-and-llms) * [Saving configurations](#saving-configurations) * [Next steps](#next-steps)
null
https://python.langchain.com/v0.2/docs/how_to/chatbots_tools/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to add tools to chatbots On this page How to add tools to chatbots ============================ Prerequisites This guide assumes familiarity with the following concepts: * [Chatbots](/v0.2/docs/concepts/#messages) * [Agents](/v0.2/docs/tutorials/agents/) * [Chat history](/v0.2/docs/concepts/#chat-history) This section will cover how to create conversational agents: chatbots that can interact with other systems and APIs using tools. Setup[​](#setup "Direct link to Setup") --------------------------------------- For this guide, we'll be using a [tool calling agent](/v0.2/docs/how_to/agent_executor/) with a single tool for searching the web. The default will be powered by [Tavily](/v0.2/docs/integrations/tools/tavily_search/), but you can switch it out for any similar tool. The rest of this section will assume you're using Tavily. You'll need to [sign up for an account](https://tavily.com/) on the Tavily website, and install the following packages: %pip install --upgrade --quiet langchain-community langchain-openai tavily-python# Set env var OPENAI_API_KEY or load from a .env file:import dotenvdotenv.load_dotenv() You will also need your OpenAI key set as `OPENAI_API_KEY` and your Tavily API key set as `TAVILY_API_KEY`. Creating an agent[​](#creating-an-agent "Direct link to Creating an agent") --------------------------------------------------------------------------- Our end goal is to create an agent that can respond conversationally to user questions while looking up information as needed. First, let's initialize Tavily and an OpenAI chat model capable of tool calling: from langchain_community.tools.tavily_search import TavilySearchResultsfrom langchain_openai import ChatOpenAItools = [TavilySearchResults(max_results=1)]# Choose the LLM that will drive the agent# Only certain models support thischat = ChatOpenAI(model="gpt-3.5-turbo-1106", temperature=0) **API Reference:**[TavilySearchResults](https://api.python.langchain.com/en/latest/tools/langchain_community.tools.tavily_search.tool.TavilySearchResults.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) To make our agent conversational, we must also choose a prompt with a placeholder for our chat history. Here's an example: from langchain_core.prompts import ChatPromptTemplate# Adapted from https://smith.langchain.com/hub/jacob/tool-calling-agentprompt = ChatPromptTemplate.from_messages( [ ( "system", "You are a helpful assistant. You may not need to use tools for every query - the user may just want to chat!", ), ("placeholder", "{messages}"), ("placeholder", "{agent_scratchpad}"), ]) **API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) Great! Now let's assemble our agent: from langchain.agents import AgentExecutor, create_tool_calling_agentagent = create_tool_calling_agent(chat, tools, prompt)agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True) **API Reference:**[AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html) | [create\_tool\_calling\_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.tool_calling_agent.base.create_tool_calling_agent.html) Running the agent[​](#running-the-agent "Direct link to Running the agent") --------------------------------------------------------------------------- Now that we've set up our agent, let's try interacting with it! It can handle both trivial queries that require no lookup: from langchain_core.messages import HumanMessageagent_executor.invoke({"messages": [HumanMessage(content="I'm Nemo!")]}) **API Reference:**[HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) > Entering new AgentExecutor chain...Hello Nemo! It's great to meet you. How can I assist you today?> Finished chain. {'messages': [HumanMessage(content="I'm Nemo!")], 'output': "Hello Nemo! It's great to meet you. How can I assist you today?"} Or, it can use of the passed search tool to get up to date information if needed: agent_executor.invoke( { "messages": [ HumanMessage( content="What is the current conservation status of the Great Barrier Reef?" ) ], }) > Entering new AgentExecutor chain...Invoking: `tavily_search_results_json` with `{'query': 'current conservation status of the Great Barrier Reef'}`[{'url': 'https://www.abc.net.au/news/2022-08-04/great-barrier-reef-report-says-coral-recovering-after-bleaching/101296186', 'content': 'Great Barrier Reef hit with widespread and severe bleaching event\n\'Devastating\': Over 90pc of reefs on Great Barrier Reef suffered bleaching over summer, report reveals\nTop Stories\nJailed Russian opposition leader Alexei Navalny is dead, says prison service\nTaylor Swift puts an Aussie twist on a classic as she packs the MCG for the biggest show of her career — as it happened\nMelbourne comes alive with Swifties, as even those without tickets turn up to soak in the atmosphere\nAustralian Border Force investigates after arrival of more than 20 men by boat north of Broome\nOpenAI launches video model that can instantly create short clips from text prompts\nAntoinette Lattouf loses bid to force ABC to produce emails calling for her dismissal\nCategory one cyclone makes landfall in Gulf of Carpentaria off NT-Queensland border\nWhy the RBA may be forced to cut before the Fed\nBrisbane records \'wettest day since 2022\', as woman dies in floodwaters near Mount Isa\n$45m Sydney beachside home once owned by late radio star is demolished less than a year after sale\nAnnabel Sutherland\'s historic double century puts Australia within reach of Test victory over South Africa\nAlmighty defensive effort delivers Indigenous victory in NRL All Stars clash\nLisa Wilkinson feared she would have to sell home to pay legal costs of Bruce Lehrmann\'s defamation case, court documents reveal\nSupermarkets as you know them are disappearing from our cities\nNRL issues Broncos\' Reynolds, Carrigan with breach notices after public scrap\nPopular Now\nJailed Russian opposition leader Alexei Navalny is dead, says prison service\nTaylor Swift puts an Aussie twist on a classic as she packs the MCG for the biggest show of her career — as it happened\n$45m Sydney beachside home once owned by late radio star is demolished less than a year after sale\nAustralian Border Force investigates after arrival of more than 20 men by boat north of Broome\nDealer sentenced for injecting children as young as 12 with methylamphetamine\nMelbourne comes alive with Swifties, as even those without tickets turn up to soak in the atmosphere\nTop Stories\nJailed Russian opposition leader Alexei Navalny is dead, says prison service\nTaylor Swift puts an Aussie twist on a classic as she packs the MCG for the biggest show of her career — as it happened\nMelbourne comes alive with Swifties, as even those without tickets turn up to soak in the atmosphere\nAustralian Border Force investigates after arrival of more than 20 men by boat north of Broome\nOpenAI launches video model that can instantly create short clips from text prompts\nJust In\nJailed Russian opposition leader Alexei Navalny is dead, says prison service\nMelbourne comes alive with Swifties, as even those without tickets turn up to soak in the atmosphere\nTraveller alert after one-year-old in Adelaide reported with measles\nAntoinette Lattouf loses bid to force ABC to produce emails calling for her dismissal\nFooter\nWe acknowledge Aboriginal and Torres Strait Islander peoples as the First Australians and Traditional Custodians of the lands where we live, learn, and work.\n Increased coral cover could come at a cost\nThe rapid growth in coral cover appears to have come at the expense of the diversity of coral on the reef, with most of the increases accounted for by fast-growing branching coral called Acropora.\n Documents obtained by the ABC under Freedom of Information laws revealed the Morrison government had forced AIMS to rush the report\'s release and orchestrated a "leak" of the material to select media outlets ahead of the reef being considered for inclusion on the World Heritage In Danger list.\n The reef\'s status and potential inclusion on the In Danger list were due to be discussed at the 45th session of the World Heritage Committee in Russia in June this year, but the meeting was indefinitely postponed due to the war in Ukraine.\n More from ABC\nEditorial Policies\nGreat Barrier Reef coral cover at record levels after mass-bleaching events, report shows\nGreat Barrier Reef coral cover at record levels after mass-bleaching events, report shows\nRecord coral cover is being seen across much of the Great Barrier Reef as it recovers from past storms and mass-bleaching events.'}]The Great Barrier Reef is currently showing signs of recovery, with record coral cover being seen across much of the reef. This recovery comes after past storms and mass-bleaching events. However, the rapid growth in coral cover appears to have come at the expense of the diversity of coral on the reef, with most of the increases accounted for by fast-growing branching coral called Acropora. There were discussions about the reef's potential inclusion on the World Heritage In Danger list, but the meeting to consider this was indefinitely postponed due to the war in Ukraine.You can read more about it in this article: [Great Barrier Reef hit with widespread and severe bleaching event](https://www.abc.net.au/news/2022-08-04/great-barrier-reef-report-says-coral-recovering-after-bleaching/101296186)> Finished chain. {'messages': [HumanMessage(content='What is the current conservation status of the Great Barrier Reef?')], 'output': "The Great Barrier Reef is currently showing signs of recovery, with record coral cover being seen across much of the reef. This recovery comes after past storms and mass-bleaching events. However, the rapid growth in coral cover appears to have come at the expense of the diversity of coral on the reef, with most of the increases accounted for by fast-growing branching coral called Acropora. There were discussions about the reef's potential inclusion on the World Heritage In Danger list, but the meeting to consider this was indefinitely postponed due to the war in Ukraine.\n\nYou can read more about it in this article: [Great Barrier Reef hit with widespread and severe bleaching event](https://www.abc.net.au/news/2022-08-04/great-barrier-reef-report-says-coral-recovering-after-bleaching/101296186)"} Conversational responses[​](#conversational-responses "Direct link to Conversational responses") ------------------------------------------------------------------------------------------------ Because our prompt contains a placeholder for chat history messages, our agent can also take previous interactions into account and respond conversationally like a standard chatbot: from langchain_core.messages import AIMessage, HumanMessageagent_executor.invoke( { "messages": [ HumanMessage(content="I'm Nemo!"), AIMessage(content="Hello Nemo! How can I assist you today?"), HumanMessage(content="What is my name?"), ], }) **API Reference:**[AIMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html) | [HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) > Entering new AgentExecutor chain...Your name is Nemo!> Finished chain. {'messages': [HumanMessage(content="I'm Nemo!"), AIMessage(content='Hello Nemo! How can I assist you today?'), HumanMessage(content='What is my name?')], 'output': 'Your name is Nemo!'} If preferred, you can also wrap the agent executor in a [`RunnableWithMessageHistory`](/v0.2/docs/how_to/message_history/) class to internally manage history messages. Let's redeclare it this way: agent = create_tool_calling_agent(chat, tools, prompt)agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True) Then, because our agent executor has multiple outputs, we also have to set the `output_messages_key` property when initializing the wrapper: from langchain_community.chat_message_histories import ChatMessageHistoryfrom langchain_core.runnables.history import RunnableWithMessageHistorydemo_ephemeral_chat_history_for_chain = ChatMessageHistory()conversational_agent_executor = RunnableWithMessageHistory( agent_executor, lambda session_id: demo_ephemeral_chat_history_for_chain, input_messages_key="messages", output_messages_key="output",)conversational_agent_executor.invoke( {"messages": [HumanMessage("I'm Nemo!")]}, {"configurable": {"session_id": "unused"}},) **API Reference:**[ChatMessageHistory](https://api.python.langchain.com/en/latest/chat_history/langchain_core.chat_history.ChatMessageHistory.html) | [RunnableWithMessageHistory](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html) > Entering new AgentExecutor chain...Hi Nemo! It's great to meet you. How can I assist you today?> Finished chain. {'messages': [HumanMessage(content="I'm Nemo!")], 'output': "Hi Nemo! It's great to meet you. How can I assist you today?"} And then if we rerun our wrapped agent executor: conversational_agent_executor.invoke( {"messages": [HumanMessage("What is my name?")]}, {"configurable": {"session_id": "unused"}},) > Entering new AgentExecutor chain...Your name is Nemo! How can I assist you today, Nemo?> Finished chain. {'messages': [HumanMessage(content="I'm Nemo!"), AIMessage(content="Hi Nemo! It's great to meet you. How can I assist you today?"), HumanMessage(content='What is my name?')], 'output': 'Your name is Nemo! How can I assist you today, Nemo?'} This [LangSmith trace](https://smith.langchain.com/public/1a9f712a-7918-4661-b3ff-d979bcc2af42/r) shows what's going on under the hood. Further reading[​](#further-reading "Direct link to Further reading") --------------------------------------------------------------------- Other types agents can also support conversational responses too - for more, check out the [agents section](/v0.2/docs/tutorials/agents/). For more on tool usage, you can also check out [this use case section](/v0.2/docs/how_to/#tools). [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/chatbots_tools.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to track token usage in ChatModels ](/v0.2/docs/how_to/chat_token_usage_tracking/)[ Next How to split code ](/v0.2/docs/how_to/code_splitter/) * [Setup](#setup) * [Creating an agent](#creating-an-agent) * [Running the agent](#running-the-agent) * [Conversational responses](#conversational-responses) * [Further reading](#further-reading)
null
https://python.langchain.com/v0.2/docs/versions/v0_2/
* [](/v0.2/) * Versions * v0.2 On this page LangChain v0.2 ============== LangChain v0.2 was released in May 2024. This release includes a number of [breaking changes and deprecations](/v0.2/docs/versions/v0_2/deprecations/). This document contains a guide on upgrading to 0.2.x. Reference * [Breaking Changes & Deprecations](/v0.2/docs/versions/v0_2/deprecations/) * [Migrating to Astream Events v2](/v0.2/docs/versions/v0_2/migrating_astream_events/) Migration ========= This documentation will help you upgrade your code to LangChain `0.2.x.`. To prepare for migration, we first recommend you take the following steps: 1. Install the 0.2.x versions of langchain-core, langchain and upgrade to recent versions of other packages that you may be using. (e.g. langgraph, langchain-community, langchain-openai, etc.) 2. Verify that your code runs properly with the new packages (e.g., unit tests pass). 3. Install a recent version of `langchain-cli` , and use the tool to replace old imports used by your code with the new imports. (See instructions below.) 4. Manually resolve any remaining deprecation warnings. 5. Re-run unit tests. 6. If you are using `astream_events`, please review how to [migrate to astream events v2](/v0.2/docs/versions/v0_2/migrating_astream_events/). Upgrade to new imports[​](#upgrade-to-new-imports "Direct link to Upgrade to new imports") ------------------------------------------------------------------------------------------ We created a tool to help migrate your code. This tool is still in **beta** and may not cover all cases, but we hope that it will help you migrate your code more quickly. The migration script has the following limitations: 1. It’s limited to helping users move from old imports to new imports. It does not help address other deprecations. 2. It can’t handle imports that involve `as` . 3. New imports are always placed in global scope, even if the old import that was replaced was located inside some local scope (e..g, function body). 4. It will likely miss some deprecated imports. Here is an example of the import changes that the migration script can help apply automatically: From Package To Package Deprecated Import New Import langchain langchain-community from langchain.vectorstores import InMemoryVectorStore from langchain\_community.vectorstores import InMemoryVectorStore langchain-community langchain\_openai from langchain\_community.chat\_models import ChatOpenAI from langchain\_openai import ChatOpenAI langchain-community langchain-core from langchain\_community.document\_loaders import Blob from langchain\_core.document\_loaders import Blob langchain langchain-core from langchain.schema.document import Document from langchain\_core.documents import Document langchain langchain-text-splitters from langchain.text\_splitter import RecursiveCharacterTextSplitter from langchain\_text\_splitters import RecursiveCharacterTextSplitter Installation[​](#installation "Direct link to Installation") ------------------------------------------------------------ pip install langchain-clilangchain-cli --version # <-- Make sure the version is at least 0.0.22 Usage[​](#usage "Direct link to Usage") --------------------------------------- Given that the migration script is not perfect, you should make sure you have a backup of your code first (e.g., using version control like `git`). You will need to run the migration script **twice** as it only applies one import replacement per run. For example, say your code still uses `from langchain.chat_models import ChatOpenAI`: After the first run, you’ll get: `from langchain_community.chat_models import ChatOpenAI` After the second run, you’ll get: `from langchain_openai import ChatOpenAI` # Run a first time# Will replace from langchain.chat_models import ChatOpenAIlangchain-cli migrate --diff [path to code] # Previewlangchain-cli migrate [path to code] # Apply# Run a second time to apply more import replacementslangchain-cli migrate --diff [path to code] # Previewlangchain-cli migrate [path to code] # Apply ### Other options[​](#other-options "Direct link to Other options") # See help menulangchain-cli migrate --help# Preview Changes without applyinglangchain-cli migrate --diff [path to code]# Run on code including ipython notebooks# Apply all import updates except for updates from langchain to langchain-corelangchain-cli migrate --disable langchain_to_core --include-ipynb [path to code] [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/versions/v0_2/index.mdx) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous Packages ](/v0.2/docs/versions/packages/)[ Next LangChain v0.2 ](/v0.2/docs/versions/v0_2/) * [Upgrade to new imports](#upgrade-to-new-imports) * [Installation](#installation) * [Usage](#usage) * [Other options](#other-options)
null
https://python.langchain.com/v0.2/docs/versions/v0_2/migrating_astream_events/
* [](/v0.2/) * Versions * [v0.2](/v0.2/docs/versions/v0_2/) * astream\_events v2 On this page Migrating to Astream Events v2 ============================== danger This migration guide is a work in progress and is not complete. Please wait to migrate astream\_events. We've added a `v2` of the astream\_events API with the release of `0.2.0`. You can see this [PR](https://github.com/langchain-ai/langchain/pull/21638) for more details. The `v2` version is a re-write of the `v1` version, and should be more efficient, with more consistent output for the events. The `v1` version of the API will be deprecated in favor of the `v2` version and will be removed in `0.4.0`. Below is a list of changes between the `v1` and `v2` versions of the API. ### output for `on_chat_model_end`[​](#output-for-on_chat_model_end "Direct link to output-for-on_chat_model_end") In `v1`, the outputs associated with `on_chat_model_end` changed depending on whether the chat model was run as a root level runnable or as part of a chain. As a root level runnable the output was: "data": {"output": AIMessageChunk(content="hello world!", id='some id')} As part of a chain the output was: "data": { "output": { "generations": [ [ { "generation_info": None, "message": AIMessageChunk( content="hello world!", id=AnyStr() ), "text": "hello world!", "type": "ChatGenerationChunk", } ] ], "llm_output": None, } }, As of `v2`, the output will always be the simpler representation: "data": {"output": AIMessageChunk(content="hello world!", id='some id')} note Non chat models (i.e., regular LLMs) are will be consistently associated with the more verbose format for now. ### output for `on_retriever_end`[​](#output-for-on_retriever_end "Direct link to output-for-on_retriever_end") `on_retriever_end` output will always return a list of `Documents`. Before: { "data": { "output": [ Document(...), Document(...), ... ] }} ### Removed `on_retriever_stream`[​](#removed-on_retriever_stream "Direct link to removed-on_retriever_stream") The `on_retriever_stream` event was an artifact of the implementation and has been removed. Full information associated with the event is already available in the `on_retriever_end` event. Please use `on_retriever_end` instead. ### Removed `on_tool_stream`[​](#removed-on_tool_stream "Direct link to removed-on_tool_stream") The `on_tool_stream` event was an artifact of the implementation and has been removed. Full information associated with the event is already available in the `on_tool_end` event. Please use `on_tool_end` instead. ### Propagating Names[​](#propagating-names "Direct link to Propagating Names") Names of runnables have been updated to be more consistent. model = GenericFakeChatModel(messages=infinite_cycle).configurable_fields( messages=ConfigurableField( id="messages", name="Messages", description="Messages return by the LLM", )) In `v1`, the event name was `RunnableConfigurableFields`. In `v2`, the event name is `GenericFakeChatModel`. If you're filtering by event names, check if you need to update your filters. ### RunnableRetry[​](#runnableretry "Direct link to RunnableRetry") Usage of [RunnableRetry](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.retry.RunnableRetry.html) within an LCEL chain being streamed generated an incorrect `on_chain_end` event in `v1` corresponding to the failed runnable invocation that was being retried. This event has been removed in `v2`. No action is required for this change. [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/versions/v0_2/migrating_astream_events.mdx) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous LangChain v0.2 ](/v0.2/docs/versions/v0_2/)[ Next Changes ](/v0.2/docs/versions/v0_2/deprecations/) * [output for `on_chat_model_end`](#output-for-on_chat_model_end) * [output for `on_retriever_end`](#output-for-on_retriever_end) * [Removed `on_retriever_stream`](#removed-on_retriever_stream) * [Removed `on_tool_stream`](#removed-on_tool_stream) * [Propagating Names](#propagating-names) * [RunnableRetry](#runnableretry)
null
https://python.langchain.com/v0.2/docs/how_to/code_splitter/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to split code On this page How to split code ================= [RecursiveCharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.RecursiveCharacterTextSplitter.html) includes pre-built lists of separators that are useful for splitting text in a specific programming language. Supported languages are stored in the `langchain_text_splitters.Language` enum. They include: "cpp","go","java","kotlin","js","ts","php","proto","python","rst","ruby","rust","scala","swift","markdown","latex","html","sol","csharp","cobol","c","lua","perl","haskell" To view the list of separators for a given language, pass a value from this enum into RecursiveCharacterTextSplitter.get_separators_for_language` To instantiate a splitter that is tailored for a specific language, pass a value from the enum into RecursiveCharacterTextSplitter.from_language Below we demonstrate examples for the various languages. %pip install -qU langchain-text-splitters from langchain_text_splitters import ( Language, RecursiveCharacterTextSplitter,) **API Reference:**[Language](https://api.python.langchain.com/en/latest/base/langchain_text_splitters.base.Language.html) | [RecursiveCharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.RecursiveCharacterTextSplitter.html) To view the full list of supported languages: [e.value for e in Language] ['cpp', 'go', 'java', 'kotlin', 'js', 'ts', 'php', 'proto', 'python', 'rst', 'ruby', 'rust', 'scala', 'swift', 'markdown', 'latex', 'html', 'sol', 'csharp', 'cobol', 'c', 'lua', 'perl', 'haskell'] You can also see the separators used for a given language: RecursiveCharacterTextSplitter.get_separators_for_language(Language.PYTHON) ['\nclass ', '\ndef ', '\n\tdef ', '\n\n', '\n', ' ', ''] Python[​](#python "Direct link to Python") ------------------------------------------ Here's an example using the PythonTextSplitter: PYTHON_CODE = """def hello_world(): print("Hello, World!")# Call the functionhello_world()"""python_splitter = RecursiveCharacterTextSplitter.from_language( language=Language.PYTHON, chunk_size=50, chunk_overlap=0)python_docs = python_splitter.create_documents([PYTHON_CODE])python_docs [Document(page_content='def hello_world():\n print("Hello, World!")'), Document(page_content='# Call the function\nhello_world()')] JS[​](#js "Direct link to JS") ------------------------------ Here's an example using the JS text splitter: JS_CODE = """function helloWorld() { console.log("Hello, World!");}// Call the functionhelloWorld();"""js_splitter = RecursiveCharacterTextSplitter.from_language( language=Language.JS, chunk_size=60, chunk_overlap=0)js_docs = js_splitter.create_documents([JS_CODE])js_docs [Document(page_content='function helloWorld() {\n console.log("Hello, World!");\n}'), Document(page_content='// Call the function\nhelloWorld();')] TS[​](#ts "Direct link to TS") ------------------------------ Here's an example using the TS text splitter: TS_CODE = """function helloWorld(): void { console.log("Hello, World!");}// Call the functionhelloWorld();"""ts_splitter = RecursiveCharacterTextSplitter.from_language( language=Language.TS, chunk_size=60, chunk_overlap=0)ts_docs = ts_splitter.create_documents([TS_CODE])ts_docs [Document(page_content='function helloWorld(): void {'), Document(page_content='console.log("Hello, World!");\n}'), Document(page_content='// Call the function\nhelloWorld();')] Markdown[​](#markdown "Direct link to Markdown") ------------------------------------------------ Here's an example using the Markdown text splitter: markdown_text = """# 🦜️🔗 LangChain⚡ Building applications with LLMs through composability ⚡## Quick Install```bash# Hopefully this code block isn't splitpip install langchain As an open-source project in a rapidly developing field, we are extremely open to contributions. """ ```pythonmd_splitter = RecursiveCharacterTextSplitter.from_language( language=Language.MARKDOWN, chunk_size=60, chunk_overlap=0)md_docs = md_splitter.create_documents([markdown_text])md_docs [Document(page_content='# 🦜️🔗 LangChain'), Document(page_content='⚡ Building applications with LLMs through composability ⚡'), Document(page_content='## Quick Install\n\n```bash'), Document(page_content="# Hopefully this code block isn't split"), Document(page_content='pip install langchain'), Document(page_content='```'), Document(page_content='As an open-source project in a rapidly developing field, we'), Document(page_content='are extremely open to contributions.')] Latex[​](#latex "Direct link to Latex") --------------------------------------- Here's an example on Latex text: latex_text = """\documentclass{article}\begin{document}\maketitle\section{Introduction}Large language models (LLMs) are a type of machine learning model that can be trained on vast amounts of text data to generate human-like language. In recent years, LLMs have made significant advances in a variety of natural language processing tasks, including language translation, text generation, and sentiment analysis.\subsection{History of LLMs}The earliest LLMs were developed in the 1980s and 1990s, but they were limited by the amount of data that could be processed and the computational power available at the time. In the past decade, however, advances in hardware and software have made it possible to train LLMs on massive datasets, leading to significant improvements in performance.\subsection{Applications of LLMs}LLMs have many applications in industry, including chatbots, content creation, and virtual assistants. They can also be used in academia for research in linguistics, psychology, and computational linguistics.\end{document}""" latex_splitter = RecursiveCharacterTextSplitter.from_language( language=Language.MARKDOWN, chunk_size=60, chunk_overlap=0)latex_docs = latex_splitter.create_documents([latex_text])latex_docs [Document(page_content='\\documentclass{article}\n\n\x08egin{document}\n\n\\maketitle'), Document(page_content='\\section{Introduction}'), Document(page_content='Large language models (LLMs) are a type of machine learning'), Document(page_content='model that can be trained on vast amounts of text data to'), Document(page_content='generate human-like language. In recent years, LLMs have'), Document(page_content='made significant advances in a variety of natural language'), Document(page_content='processing tasks, including language translation, text'), Document(page_content='generation, and sentiment analysis.'), Document(page_content='\\subsection{History of LLMs}'), Document(page_content='The earliest LLMs were developed in the 1980s and 1990s,'), Document(page_content='but they were limited by the amount of data that could be'), Document(page_content='processed and the computational power available at the'), Document(page_content='time. In the past decade, however, advances in hardware and'), Document(page_content='software have made it possible to train LLMs on massive'), Document(page_content='datasets, leading to significant improvements in'), Document(page_content='performance.'), Document(page_content='\\subsection{Applications of LLMs}'), Document(page_content='LLMs have many applications in industry, including'), Document(page_content='chatbots, content creation, and virtual assistants. They'), Document(page_content='can also be used in academia for research in linguistics,'), Document(page_content='psychology, and computational linguistics.'), Document(page_content='\\end{document}')] HTML[​](#html "Direct link to HTML") ------------------------------------ Here's an example using an HTML text splitter: html_text = """<!DOCTYPE html><html> <head> <title>🦜️🔗 LangChain</title> <style> body { font-family: Arial, sans-serif; } h1 { color: darkblue; } </style> </head> <body> <div> <h1>🦜️🔗 LangChain</h1> <p>⚡ Building applications with LLMs through composability ⚡</p> </div> <div> As an open-source project in a rapidly developing field, we are extremely open to contributions. </div> </body></html>""" html_splitter = RecursiveCharacterTextSplitter.from_language( language=Language.HTML, chunk_size=60, chunk_overlap=0)html_docs = html_splitter.create_documents([html_text])html_docs [Document(page_content='<!DOCTYPE html>\n<html>'), Document(page_content='<head>\n <title>🦜️🔗 LangChain</title>'), Document(page_content='<style>\n body {\n font-family: Aria'), Document(page_content='l, sans-serif;\n }\n h1 {'), Document(page_content='color: darkblue;\n }\n </style>\n </head'), Document(page_content='>'), Document(page_content='<body>'), Document(page_content='<div>\n <h1>🦜️🔗 LangChain</h1>'), Document(page_content='<p>⚡ Building applications with LLMs through composability ⚡'), Document(page_content='</p>\n </div>'), Document(page_content='<div>\n As an open-source project in a rapidly dev'), Document(page_content='eloping field, we are extremely open to contributions.'), Document(page_content='</div>\n </body>\n</html>')] Solidity[​](#solidity "Direct link to Solidity") ------------------------------------------------ Here's an example using the Solidity text splitter: SOL_CODE = """pragma solidity ^0.8.20;contract HelloWorld { function add(uint a, uint b) pure public returns(uint) { return a + b; }}"""sol_splitter = RecursiveCharacterTextSplitter.from_language( language=Language.SOL, chunk_size=128, chunk_overlap=0)sol_docs = sol_splitter.create_documents([SOL_CODE])sol_docs [Document(page_content='pragma solidity ^0.8.20;'), Document(page_content='contract HelloWorld {\n function add(uint a, uint b) pure public returns(uint) {\n return a + b;\n }\n}')] C#[​](#c "Direct link to C#") ----------------------------- Here's an example using the C# text splitter: C_CODE = """using System;class Program{ static void Main() { int age = 30; // Change the age value as needed // Categorize the age without any console output if (age < 18) { // Age is under 18 } else if (age >= 18 && age < 65) { // Age is an adult } else { // Age is a senior citizen } }}"""c_splitter = RecursiveCharacterTextSplitter.from_language( language=Language.CSHARP, chunk_size=128, chunk_overlap=0)c_docs = c_splitter.create_documents([C_CODE])c_docs [Document(page_content='using System;'), Document(page_content='class Program\n{\n static void Main()\n {\n int age = 30; // Change the age value as needed'), Document(page_content='// Categorize the age without any console output\n if (age < 18)\n {\n // Age is under 18'), Document(page_content='}\n else if (age >= 18 && age < 65)\n {\n // Age is an adult\n }\n else\n {'), Document(page_content='// Age is a senior citizen\n }\n }\n}')] Haskell[​](#haskell "Direct link to Haskell") --------------------------------------------- Here's an example using the Haskell text splitter: HASKELL_CODE = """main :: IO ()main = do putStrLn "Hello, World!"-- Some sample functionsadd :: Int -> Int -> Intadd x y = x + y"""haskell_splitter = RecursiveCharacterTextSplitter.from_language( language=Language.HASKELL, chunk_size=50, chunk_overlap=0)haskell_docs = haskell_splitter.create_documents([HASKELL_CODE])haskell_docs [Document(page_content='main :: IO ()'), Document(page_content='main = do\n putStrLn "Hello, World!"\n-- Some'), Document(page_content='sample functions\nadd :: Int -> Int -> Int\nadd x y'), Document(page_content='= x + y')] PHP[​](#php "Direct link to PHP") --------------------------------- Here's an example using the PHP text splitter: PHP_CODE = """<?phpnamespace foo;class Hello { public function __construct() { }}function hello() { echo "Hello World!";}interface Human { public function breath();}trait Foo { }enum Color{ case Red; case Blue;}"""php_splitter = RecursiveCharacterTextSplitter.from_language( language=Language.PHP, chunk_size=50, chunk_overlap=0)haskell_docs = php_splitter.create_documents([PHP_CODE])haskell_docs [Document(page_content='<?php\nnamespace foo;'), Document(page_content='class Hello {'), Document(page_content='public function __construct() { }\n}'), Document(page_content='function hello() {\n echo "Hello World!";\n}'), Document(page_content='interface Human {\n public function breath();\n}'), Document(page_content='trait Foo { }\nenum Color\n{\n case Red;'), Document(page_content='case Blue;\n}')] [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/code_splitter.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to add tools to chatbots ](/v0.2/docs/how_to/chatbots_tools/)[ Next How to do retrieval with contextual compression ](/v0.2/docs/how_to/contextual_compression/) * [Python](#python) * [JS](#js) * [TS](#ts) * [Markdown](#markdown) * [Latex](#latex) * [HTML](#html) * [Solidity](#solidity) * [C#](#c) * [Haskell](#haskell) * [PHP](#php)
null
https://python.langchain.com/v0.2/docs/how_to/query_high_cardinality/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How deal with high cardinality categoricals when doing query analysis On this page How deal with high cardinality categoricals when doing query analysis ===================================================================== You may want to do query analysis to create a filter on a categorical column. One of the difficulties here is that you usually need to specify the EXACT categorical value. The issue is you need to make sure the LLM generates that categorical value exactly. This can be done relatively easy with prompting when there are only a few values that are valid. When there are a high number of valid values then it becomes more difficult, as those values may not fit in the LLM context, or (if they do) there may be too many for the LLM to properly attend to. In this notebook we take a look at how to approach this. Setup[​](#setup "Direct link to Setup") --------------------------------------- #### Install dependencies[​](#install-dependencies "Direct link to Install dependencies") # %pip install -qU langchain langchain-community langchain-openai faker langchain-chroma #### Set environment variables[​](#set-environment-variables "Direct link to Set environment variables") We'll use OpenAI in this example: import getpassimport osos.environ["OPENAI_API_KEY"] = getpass.getpass()# Optional, uncomment to trace runs with LangSmith. Sign up here: https://smith.langchain.com.# os.environ["LANGCHAIN_TRACING_V2"] = "true"# os.environ["LANGCHAIN_API_KEY"] = getpass.getpass() #### Set up data[​](#set-up-data "Direct link to Set up data") We will generate a bunch of fake names from faker import Fakerfake = Faker()names = [fake.name() for _ in range(10000)] Let's look at some of the names names[0] 'Hayley Gonzalez' names[567] 'Jesse Knight' Query Analysis[​](#query-analysis "Direct link to Query Analysis") ------------------------------------------------------------------ We can now set up a baseline query analysis from langchain_core.pydantic_v1 import BaseModel, Field class Search(BaseModel): query: str author: str from langchain_core.prompts import ChatPromptTemplatefrom langchain_core.runnables import RunnablePassthroughfrom langchain_openai import ChatOpenAIsystem = """Generate a relevant search query for a library system"""prompt = ChatPromptTemplate.from_messages( [ ("system", system), ("human", "{question}"), ])llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)structured_llm = llm.with_structured_output(Search)query_analyzer = {"question": RunnablePassthrough()} | prompt | structured_llm **API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) /Users/harrisonchase/workplace/langchain/libs/core/langchain_core/_api/beta_decorator.py:86: LangChainBetaWarning: The function `with_structured_output` is in beta. It is actively being worked on, so the API may change. warn_beta( We can see that if we spell the name exactly correctly, it knows how to handle it query_analyzer.invoke("what are books about aliens by Jesse Knight") Search(query='books about aliens', author='Jesse Knight') The issue is that the values you want to filter on may NOT be spelled exactly correctly query_analyzer.invoke("what are books about aliens by jess knight") Search(query='books about aliens', author='Jess Knight') ### Add in all values[​](#add-in-all-values "Direct link to Add in all values") One way around this is to add ALL possible values to the prompt. That will generally guide the query in the right direction system = """Generate a relevant search query for a library system.`author` attribute MUST be one of:{authors}Do NOT hallucinate author name!"""base_prompt = ChatPromptTemplate.from_messages( [ ("system", system), ("human", "{question}"), ])prompt = base_prompt.partial(authors=", ".join(names)) query_analyzer_all = {"question": RunnablePassthrough()} | prompt | structured_llm However... if the list of categoricals is long enough, it may error! try: res = query_analyzer_all.invoke("what are books about aliens by jess knight")except Exception as e: print(e) Error code: 400 - {'error': {'message': "This model's maximum context length is 16385 tokens. However, your messages resulted in 33885 tokens (33855 in the messages, 30 in the functions). Please reduce the length of the messages or functions.", 'type': 'invalid_request_error', 'param': 'messages', 'code': 'context_length_exceeded'}} We can try to use a longer context window... but with so much information in there, it is not garunteed to pick it up reliably llm_long = ChatOpenAI(model="gpt-4-turbo-preview", temperature=0)structured_llm_long = llm_long.with_structured_output(Search)query_analyzer_all = {"question": RunnablePassthrough()} | prompt | structured_llm_long query_analyzer_all.invoke("what are books about aliens by jess knight") Search(query='aliens', author='Kevin Knight') ### Find and all relevant values[​](#find-and-all-relevant-values "Direct link to Find and all relevant values") Instead, what we can do is create an index over the relevant values and then query that for the N most relevant values, from langchain_chroma import Chromafrom langchain_openai import OpenAIEmbeddingsembeddings = OpenAIEmbeddings(model="text-embedding-3-small")vectorstore = Chroma.from_texts(names, embeddings, collection_name="author_names") **API Reference:**[OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) def select_names(question): _docs = vectorstore.similarity_search(question, k=10) _names = [d.page_content for d in _docs] return ", ".join(_names) create_prompt = { "question": RunnablePassthrough(), "authors": select_names,} | base_prompt query_analyzer_select = create_prompt | structured_llm create_prompt.invoke("what are books by jess knight") ChatPromptValue(messages=[SystemMessage(content='Generate a relevant search query for a library system.\n\n`author` attribute MUST be one of:\n\nJesse Knight, Kelly Knight, Scott Knight, Richard Knight, Andrew Knight, Katherine Knight, Erica Knight, Ashley Knight, Becky Knight, Kevin Knight\n\nDo NOT hallucinate author name!'), HumanMessage(content='what are books by jess knight')]) query_analyzer_select.invoke("what are books about aliens by jess knight") Search(query='books about aliens', author='Jesse Knight') ### Replace after selection[​](#replace-after-selection "Direct link to Replace after selection") Another method is to let the LLM fill in whatever value, but then convert that value to a valid value. This can actually be done with the Pydantic class itself! from langchain_core.pydantic_v1 import validatorclass Search(BaseModel): query: str author: str @validator("author") def double(cls, v: str) -> str: return vectorstore.similarity_search(v, k=1)[0].page_content system = """Generate a relevant search query for a library system"""prompt = ChatPromptTemplate.from_messages( [ ("system", system), ("human", "{question}"), ])corrective_structure_llm = llm.with_structured_output(Search)corrective_query_analyzer = ( {"question": RunnablePassthrough()} | prompt | corrective_structure_llm) corrective_query_analyzer.invoke("what are books about aliens by jes knight") Search(query='books about aliens', author='Jesse Knight') # TODO: show trigram similarity [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/query_high_cardinality.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to configure runtime chain internals ](/v0.2/docs/how_to/configure/)[ Next Custom Document Loader ](/v0.2/docs/how_to/document_loader_custom/) * [Setup](#setup) * [Query Analysis](#query-analysis) * [Add in all values](#add-in-all-values) * [Find and all relevant values](#find-and-all-relevant-values) * [Replace after selection](#replace-after-selection)
null
https://python.langchain.com/v0.2/docs/versions/v0_2/deprecations/
* [](/v0.2/) * Versions * [v0.2](/v0.2/docs/versions/v0_2/) * Changes On this page Deprecations and Breaking Changes ================================= This code contains a list of deprecations and removals in the `langchain` and `langchain-core` packages. New features and improvements are not listed here. See the [overview](/v0.2/docs/versions/overview/) for a summary of what's new in this release. Breaking changes[​](#breaking-changes "Direct link to Breaking changes") ------------------------------------------------------------------------ As of release 0.2.0, `langchain` is required to be integration-agnostic. This means that code in `langchain` should not by default instantiate any specific chat models, llms, embedding models, vectorstores etc; instead, the user will be required to specify those explicitly. The following functions and classes require an explicit LLM to be passed as an argument: * `langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreToolkit` * `langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreRouterToolkit` * `langchain.chains.openai_functions.get_openapi_chain` * `langchain.chains.router.MultiRetrievalQAChain.from_retrievers` * `langchain.indexes.VectorStoreIndexWrapper.query` * `langchain.indexes.VectorStoreIndexWrapper.query_with_sources` * `langchain.indexes.VectorStoreIndexWrapper.aquery_with_sources` * `langchain.chains.flare.FlareChain` The following classes now require passing an explicit Embedding model as an argument: * `langchain.indexes.VectostoreIndexCreator` The following code has been removed: * `langchain.natbot.NatBotChain.from_default` removed in favor of the `from_llm` class method. Behavior was changed for the following code: ### @tool decorator[​](#tool-decorator "Direct link to @tool decorator") `@tool` decorator now assigns the function doc-string as the tool description. Previously, the `@tool` decorator using to prepend the function signature. Before 0.2.0: @tooldef my_tool(x: str) -> str: """Some description.""" return "something"print(my_tool.description) Would result in: `my_tool: (x: str) -> str - Some description.` As of 0.2.0: It will result in: `Some description.` Code that moved to another package[​](#code-that-moved-to-another-package "Direct link to Code that moved to another package") ------------------------------------------------------------------------------------------------------------------------------ Code that was moved from `langchain` into another package (e.g, `langchain-community`) If you try to import it from `langchain`, the import will keep on working, but will raise a deprecation warning. The warning will provide a replacement import statement. python -c "from langchain.document_loaders.markdown import UnstructuredMarkdownLoader" LangChainDeprecationWarning: Importing UnstructuredMarkdownLoader from langchain.document_loaders is deprecated. Please replace deprecated imports:>> from langchain.document_loaders import UnstructuredMarkdownLoaderwith new imports of:>> from langchain_community.document_loaders import UnstructuredMarkdownLoader We will continue supporting the imports in `langchain` until release 0.4 as long as the relevant package where the code lives is installed. (e.g., as long as `langchain_community` is installed.) However, we advise for users to not rely on these imports and instead migrate to the new imports. To help with this process, we’re releasing a migration script via the LangChain CLI. See further instructions in migration guide. Code targeted for removal[​](#code-targeted-for-removal "Direct link to Code targeted for removal") --------------------------------------------------------------------------------------------------- Code that has better alternatives available and will eventually be removed, so there’s only a single way to do things. (e.g., `predict_messages` method in ChatModels has been deprecated in favor of `invoke`). ### astream events V1[​](#astream-events-v1 "Direct link to astream events V1") If you are using `astream_events`, please review how to [migrate to astream events v2](/v0.2/docs/versions/v0_2/migrating_astream_events/). ### langchain\_core[​](#langchain_core "Direct link to langchain_core") #### try\_load\_from\_hub[​](#try_load_from_hub "Direct link to try_load_from_hub") In module: `utils.loading` Deprecated: 0.1.30 Removal: 0.3.0 Alternative: Using the hwchase17/langchain-hub repo for prompts is deprecated. Please use [https://smith.langchain.com/hub](https://smith.langchain.com/hub) instead. #### BaseLanguageModel.predict[​](#baselanguagemodelpredict "Direct link to BaseLanguageModel.predict") In module: `language_models.base` Deprecated: 0.1.7 Removal: 0.3.0 Alternative: invoke #### BaseLanguageModel.predict\_messages[​](#baselanguagemodelpredict_messages "Direct link to BaseLanguageModel.predict_messages") In module: `language_models.base` Deprecated: 0.1.7 Removal: 0.3.0 Alternative: invoke #### BaseLanguageModel.apredict[​](#baselanguagemodelapredict "Direct link to BaseLanguageModel.apredict") In module: `language_models.base` Deprecated: 0.1.7 Removal: 0.3.0 Alternative: ainvoke #### BaseLanguageModel.apredict\_messages[​](#baselanguagemodelapredict_messages "Direct link to BaseLanguageModel.apredict_messages") In module: `language_models.base` Deprecated: 0.1.7 Removal: 0.3.0 Alternative: ainvoke #### RunTypeEnum[​](#runtypeenum "Direct link to RunTypeEnum") In module: `tracers.schemas` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: Use string instead. #### TracerSessionV1Base[​](#tracersessionv1base "Direct link to TracerSessionV1Base") In module: `tracers.schemas` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: #### TracerSessionV1Create[​](#tracersessionv1create "Direct link to TracerSessionV1Create") In module: `tracers.schemas` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: #### TracerSessionV1[​](#tracersessionv1 "Direct link to TracerSessionV1") In module: `tracers.schemas` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: #### TracerSessionBase[​](#tracersessionbase "Direct link to TracerSessionBase") In module: `tracers.schemas` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: #### TracerSession[​](#tracersession "Direct link to TracerSession") In module: `tracers.schemas` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: #### BaseRun[​](#baserun "Direct link to BaseRun") In module: `tracers.schemas` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: Run #### LLMRun[​](#llmrun "Direct link to LLMRun") In module: `tracers.schemas` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: Run #### ChainRun[​](#chainrun "Direct link to ChainRun") In module: `tracers.schemas` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: Run #### ToolRun[​](#toolrun "Direct link to ToolRun") In module: `tracers.schemas` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: Run #### BaseChatModel.**call**[​](#basechatmodelcall "Direct link to basechatmodelcall") In module: `language_models.chat_models` Deprecated: 0.1.7 Removal: 0.3.0 Alternative: invoke #### BaseChatModel.call\_as\_llm[​](#basechatmodelcall_as_llm "Direct link to BaseChatModel.call_as_llm") In module: `language_models.chat_models` Deprecated: 0.1.7 Removal: 0.3.0 Alternative: invoke #### BaseChatModel.predict[​](#basechatmodelpredict "Direct link to BaseChatModel.predict") In module: `language_models.chat_models` Deprecated: 0.1.7 Removal: 0.3.0 Alternative: invoke #### BaseChatModel.predict\_messages[​](#basechatmodelpredict_messages "Direct link to BaseChatModel.predict_messages") In module: `language_models.chat_models` Deprecated: 0.1.7 Removal: 0.3.0 Alternative: invoke #### BaseChatModel.apredict[​](#basechatmodelapredict "Direct link to BaseChatModel.apredict") In module: `language_models.chat_models` Deprecated: 0.1.7 Removal: 0.3.0 Alternative: ainvoke #### BaseChatModel.apredict\_messages[​](#basechatmodelapredict_messages "Direct link to BaseChatModel.apredict_messages") In module: `language_models.chat_models` Deprecated: 0.1.7 Removal: 0.3.0 Alternative: ainvoke #### BaseLLM.**call**[​](#basellmcall "Direct link to basellmcall") In module: `language_models.llms` Deprecated: 0.1.7 Removal: 0.3.0 Alternative: invoke #### BaseLLM.predict[​](#basellmpredict "Direct link to BaseLLM.predict") In module: `language_models.llms` Deprecated: 0.1.7 Removal: 0.3.0 Alternative: invoke #### BaseLLM.predict\_messages[​](#basellmpredict_messages "Direct link to BaseLLM.predict_messages") In module: `language_models.llms` Deprecated: 0.1.7 Removal: 0.3.0 Alternative: invoke #### BaseLLM.apredict[​](#basellmapredict "Direct link to BaseLLM.apredict") In module: `language_models.llms` Deprecated: 0.1.7 Removal: 0.3.0 Alternative: ainvoke #### BaseLLM.apredict\_messages[​](#basellmapredict_messages "Direct link to BaseLLM.apredict_messages") In module: `language_models.llms` Deprecated: 0.1.7 Removal: 0.3.0 Alternative: ainvoke #### BaseRetriever.get\_relevant\_documents[​](#baseretrieverget_relevant_documents "Direct link to BaseRetriever.get_relevant_documents") In module: `retrievers` Deprecated: 0.1.46 Removal: 0.3.0 Alternative: invoke #### BaseRetriever.aget\_relevant\_documents[​](#baseretrieveraget_relevant_documents "Direct link to BaseRetriever.aget_relevant_documents") In module: `retrievers` Deprecated: 0.1.46 Removal: 0.3.0 Alternative: ainvoke #### ChatPromptTemplate.from\_role\_strings[​](#chatprompttemplatefrom_role_strings "Direct link to ChatPromptTemplate.from_role_strings") In module: `prompts.chat` Deprecated: 0.0.1 Removal: Alternative: from\_messages classmethod #### ChatPromptTemplate.from\_strings[​](#chatprompttemplatefrom_strings "Direct link to ChatPromptTemplate.from_strings") In module: `prompts.chat` Deprecated: 0.0.1 Removal: Alternative: from\_messages classmethod #### BaseTool.**call**[​](#basetoolcall "Direct link to basetoolcall") In module: `tools` Deprecated: 0.1.47 Removal: 0.3.0 Alternative: invoke #### convert\_pydantic\_to\_openai\_function[​](#convert_pydantic_to_openai_function "Direct link to convert_pydantic_to_openai_function") In module: `utils.function_calling` Deprecated: 0.1.16 Removal: 0.3.0 Alternative: langchain\_core.utils.function\_calling.convert\_to\_openai\_function() #### convert\_pydantic\_to\_openai\_tool[​](#convert_pydantic_to_openai_tool "Direct link to convert_pydantic_to_openai_tool") In module: `utils.function_calling` Deprecated: 0.1.16 Removal: 0.3.0 Alternative: langchain\_core.utils.function\_calling.convert\_to\_openai\_tool() #### convert\_python\_function\_to\_openai\_function[​](#convert_python_function_to_openai_function "Direct link to convert_python_function_to_openai_function") In module: `utils.function_calling` Deprecated: 0.1.16 Removal: 0.3.0 Alternative: langchain\_core.utils.function\_calling.convert\_to\_openai\_function() #### format\_tool\_to\_openai\_function[​](#format_tool_to_openai_function "Direct link to format_tool_to_openai_function") In module: `utils.function_calling` Deprecated: 0.1.16 Removal: 0.3.0 Alternative: langchain\_core.utils.function\_calling.convert\_to\_openai\_function() #### format\_tool\_to\_openai\_tool[​](#format_tool_to_openai_tool "Direct link to format_tool_to_openai_tool") In module: `utils.function_calling` Deprecated: 0.1.16 Removal: 0.3.0 Alternative: langchain\_core.utils.function\_calling.convert\_to\_openai\_tool() ### langchain[​](#langchain "Direct link to langchain") #### AgentType[​](#agenttype "Direct link to AgentType") In module: `agents.agent_types` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: Use [LangGraph](/v0.2/docs/how_to/migrate_agent/) or new agent constructor methods like create\_react\_agent, create\_json\_agent, create\_structured\_chat\_agent, etc. #### Chain.**call**[​](#chaincall "Direct link to chaincall") In module: `chains.base` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: invoke #### Chain.acall[​](#chainacall "Direct link to Chain.acall") In module: `chains.base` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: ainvoke #### Chain.run[​](#chainrun-1 "Direct link to Chain.run") In module: `chains.base` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: invoke #### Chain.arun[​](#chainarun "Direct link to Chain.arun") In module: `chains.base` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: ainvoke #### Chain.apply[​](#chainapply "Direct link to Chain.apply") In module: `chains.base` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: batch #### LLMChain[​](#llmchain "Direct link to LLMChain") In module: `chains.llm` Deprecated: 0.1.17 Removal: 0.3.0 Alternative: [RunnableSequence](/v0.2/docs/how_to/sequence/), e.g., `prompt | llm` #### LLMSingleActionAgent[​](#llmsingleactionagent "Direct link to LLMSingleActionAgent") In module: `agents.agent` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: Use [LangGraph](/v0.2/docs/how_to/migrate_agent/) or new agent constructor methods like create\_react\_agent, create\_json\_agent, create\_structured\_chat\_agent, etc. #### Agent[​](#agent "Direct link to Agent") In module: `agents.agent` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: Use [LangGraph](/v0.2/docs/how_to/migrate_agent/) or new agent constructor methods like create\_react\_agent, create\_json\_agent, create\_structured\_chat\_agent, etc. #### OpenAIFunctionsAgent[​](#openaifunctionsagent "Direct link to OpenAIFunctionsAgent") In module: `agents.openai_functions_agent.base` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: create\_openai\_functions\_agent #### ZeroShotAgent[​](#zeroshotagent "Direct link to ZeroShotAgent") In module: `agents.mrkl.base` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: create\_react\_agent #### MRKLChain[​](#mrklchain "Direct link to MRKLChain") In module: `agents.mrkl.base` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: #### ConversationalAgent[​](#conversationalagent "Direct link to ConversationalAgent") In module: `agents.conversational.base` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: create\_react\_agent #### ConversationalChatAgent[​](#conversationalchatagent "Direct link to ConversationalChatAgent") In module: `agents.conversational_chat.base` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: create\_json\_chat\_agent #### ChatAgent[​](#chatagent "Direct link to ChatAgent") In module: `agents.chat.base` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: create\_react\_agent #### OpenAIMultiFunctionsAgent[​](#openaimultifunctionsagent "Direct link to OpenAIMultiFunctionsAgent") In module: `agents.openai_functions_multi_agent.base` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: create\_openai\_tools\_agent #### ReActDocstoreAgent[​](#reactdocstoreagent "Direct link to ReActDocstoreAgent") In module: `agents.react.base` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: #### DocstoreExplorer[​](#docstoreexplorer "Direct link to DocstoreExplorer") In module: `agents.react.base` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: #### ReActTextWorldAgent[​](#reacttextworldagent "Direct link to ReActTextWorldAgent") In module: `agents.react.base` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: #### ReActChain[​](#reactchain "Direct link to ReActChain") In module: `agents.react.base` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: #### SelfAskWithSearchAgent[​](#selfaskwithsearchagent "Direct link to SelfAskWithSearchAgent") In module: `agents.self_ask_with_search.base` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: create\_self\_ask\_with\_search #### SelfAskWithSearchChain[​](#selfaskwithsearchchain "Direct link to SelfAskWithSearchChain") In module: `agents.self_ask_with_search.base` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: #### StructuredChatAgent[​](#structuredchatagent "Direct link to StructuredChatAgent") In module: `agents.structured_chat.base` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: create\_structured\_chat\_agent #### RetrievalQA[​](#retrievalqa "Direct link to RetrievalQA") In module: `chains.retrieval_qa.base` Deprecated: 0.1.17 Removal: 0.3.0 Alternative: [create\_retrieval\_chain](https://api.python.langchain.com/en/latest/chains/langchain.chains.retrieval.create_retrieval_chain.html#langchain-chains-retrieval-create-retrieval-chain) #### load\_agent\_from\_config[​](#load_agent_from_config "Direct link to load_agent_from_config") In module: `agents.loading` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: #### load\_agent[​](#load_agent "Direct link to load_agent") In module: `agents.loading` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: #### initialize\_agent[​](#initialize_agent "Direct link to initialize_agent") In module: `agents.initialize` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: Use [LangGraph](/v0.2/docs/how_to/migrate_agent/) or new agent constructor methods like create\_react\_agent, create\_json\_agent, create\_structured\_chat\_agent, etc. #### XMLAgent[​](#xmlagent "Direct link to XMLAgent") In module: `agents.xml.base` Deprecated: 0.1.0 Removal: 0.3.0 Alternative: create\_xml\_agent #### CohereRerank[​](#coherererank "Direct link to CohereRerank") In module: `retrievers.document_compressors.cohere_rerank` Deprecated: 0.0.30 Removal: 0.3.0 Alternative: langchain\_cohere.CohereRerank #### ConversationalRetrievalChain[​](#conversationalretrievalchain "Direct link to ConversationalRetrievalChain") In module: `chains.conversational_retrieval.base` Deprecated: 0.1.17 Removal: 0.3.0 Alternative: [create\_history\_aware\_retriever](https://api.python.langchain.com/en/latest/chains/langchain.chains.history_aware_retriever.create_history_aware_retriever.html) together with [create\_retrieval\_chain](https://api.python.langchain.com/en/latest/chains/langchain.chains.retrieval.create_retrieval_chain.html#langchain-chains-retrieval-create-retrieval-chain) (see example in docstring) #### create\_extraction\_chain\_pydantic[​](#create_extraction_chain_pydantic "Direct link to create_extraction_chain_pydantic") In module: `chains.openai_tools.extraction` Deprecated: 0.1.14 Removal: 0.3.0 Alternative: [with\_structured\_output](/v0.2/docs/how_to/structured_output/#the-with_structured_output-method) method on chat models that support tool calling. #### create\_openai\_fn\_runnable[​](#create_openai_fn_runnable "Direct link to create_openai_fn_runnable") In module: `chains.structured_output.base` Deprecated: 0.1.14 Removal: 0.3.0 Alternative: [with\_structured\_output](/v0.2/docs/how_to/structured_output/#the-with_structured_output-method) method on chat models that support tool calling. #### create\_structured\_output\_runnable[​](#create_structured_output_runnable "Direct link to create_structured_output_runnable") In module: `chains.structured_output.base` Deprecated: 0.1.17 Removal: 0.3.0 Alternative: [with\_structured\_output](/v0.2/docs/how_to/structured_output/#the-with_structured_output-method) method on chat models that support tool calling. #### create\_openai\_fn\_chain[​](#create_openai_fn_chain "Direct link to create_openai_fn_chain") In module: `chains.openai_functions.base` Deprecated: 0.1.1 Removal: 0.3.0 Alternative: create\_openai\_fn\_runnable #### create\_structured\_output\_chain[​](#create_structured_output_chain "Direct link to create_structured_output_chain") In module: `chains.openai_functions.base` Deprecated: 0.1.1 Removal: 0.3.0 Alternative: ChatOpenAI.with\_structured\_output #### create\_extraction\_chain[​](#create_extraction_chain "Direct link to create_extraction_chain") In module: `chains.openai_functions.extraction` Deprecated: 0.1.14 Removal: 0.3.0 Alternative: [with\_structured\_output](/v0.2/docs/how_to/structured_output/#the-with_structured_output-method) method on chat models that support tool calling. #### create\_extraction\_chain\_pydantic[​](#create_extraction_chain_pydantic-1 "Direct link to create_extraction_chain_pydantic") In module: `chains.openai_functions.extraction` Deprecated: 0.1.14 Removal: 0.3.0 Alternative: [with\_structured\_output](/v0.2/docs/how_to/structured_output/#the-with_structured_output-method) method on chat models that support tool calling. [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/versions/v0_2/deprecations.mdx) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous astream\_events v2 ](/v0.2/docs/versions/v0_2/migrating_astream_events/)[ Next Security ](/v0.2/docs/security/) * [Breaking changes](#breaking-changes) * [@tool decorator](#tool-decorator) * [Code that moved to another package](#code-that-moved-to-another-package) * [Code targeted for removal](#code-targeted-for-removal) * [astream events V1](#astream-events-v1) * [langchain\_core](#langchain_core) * [langchain](#langchain)
null
https://python.langchain.com/v0.2/docs/how_to/document_loader_custom/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * Custom Document Loader On this page How to create a custom Document Loader ====================================== Overview[​](#overview "Direct link to Overview") ------------------------------------------------ Applications based on LLMs frequently entail extracting data from databases or files, like PDFs, and converting it into a format that LLMs can utilize. In LangChain, this usually involves creating Document objects, which encapsulate the extracted text (`page_content`) along with metadata—a dictionary containing details about the document, such as the author's name or the date of publication. `Document` objects are often formatted into prompts that are fed into an LLM, allowing the LLM to use the information in the `Document` to generate a desired response (e.g., summarizing the document). `Documents` can be either used immediately or indexed into a vectorstore for future retrieval and use. The main abstractions for Document Loading are: Component Description Document Contains `text` and `metadata` BaseLoader Use to convert raw data into `Documents` Blob A representation of binary data that's located either in a file or in memory BaseBlobParser Logic to parse a `Blob` to yield `Document` objects This guide will demonstrate how to write custom document loading and file parsing logic; specifically, we'll see how to: 1. Create a standard document Loader by sub-classing from `BaseLoader`. 2. Create a parser using `BaseBlobParser` and use it in conjunction with `Blob` and `BlobLoaders`. This is useful primarily when working with files. Standard Document Loader[​](#standard-document-loader "Direct link to Standard Document Loader") ------------------------------------------------------------------------------------------------ A document loader can be implemented by sub-classing from a `BaseLoader` which provides a standard interface for loading documents. ### Interface[​](#interface "Direct link to Interface") Method Name Explanation lazy\_load Used to load documents one by one **lazily**. Use for production code. alazy\_load Async variant of `lazy_load` load Used to load all the documents into memory **eagerly**. Use for prototyping or interactive work. aload Used to load all the documents into memory **eagerly**. Use for prototyping or interactive work. **Added in 2024-04 to LangChain.** * The `load` methods is a convenience method meant solely for prototyping work -- it just invokes `list(self.lazy_load())`. * The `alazy_load` has a default implementation that will delegate to `lazy_load`. If you're using async, we recommend overriding the default implementation and providing a native async implementation. ::: {.callout-important} When implementing a document loader do **NOT** provide parameters via the `lazy_load` or `alazy_load` methods. All configuration is expected to be passed through the initializer (**init**). This was a design choice made by LangChain to make sure that once a document loader has been instantiated it has all the information needed to load documents. ::: ### Implementation[​](#implementation "Direct link to Implementation") Let's create an example of a standard document loader that loads a file and creates a document from each line in the file. from typing import AsyncIterator, Iteratorfrom langchain_core.document_loaders import BaseLoaderfrom langchain_core.documents import Documentclass CustomDocumentLoader(BaseLoader): """An example document loader that reads a file line by line.""" def __init__(self, file_path: str) -> None: """Initialize the loader with a file path. Args: file_path: The path to the file to load. """ self.file_path = file_path def lazy_load(self) -> Iterator[Document]: # <-- Does not take any arguments """A lazy loader that reads a file line by line. When you're implementing lazy load methods, you should use a generator to yield documents one by one. """ with open(self.file_path, encoding="utf-8") as f: line_number = 0 for line in f: yield Document( page_content=line, metadata={"line_number": line_number, "source": self.file_path}, ) line_number += 1 # alazy_load is OPTIONAL. # If you leave out the implementation, a default implementation which delegates to lazy_load will be used! async def alazy_load( self, ) -> AsyncIterator[Document]: # <-- Does not take any arguments """An async lazy loader that reads a file line by line.""" # Requires aiofiles # Install with `pip install aiofiles` # https://github.com/Tinche/aiofiles import aiofiles async with aiofiles.open(self.file_path, encoding="utf-8") as f: line_number = 0 async for line in f: yield Document( page_content=line, metadata={"line_number": line_number, "source": self.file_path}, ) line_number += 1 **API Reference:**[BaseLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_core.document_loaders.base.BaseLoader.html) | [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) ### Test 🧪[​](#test- "Direct link to Test 🧪") To test out the document loader, we need a file with some quality content. with open("./meow.txt", "w", encoding="utf-8") as f: quality_content = "meow meow🐱 \n meow meow🐱 \n meow😻😻" f.write(quality_content)loader = CustomDocumentLoader("./meow.txt") ## Test out the lazy load interfacefor doc in loader.lazy_load(): print() print(type(doc)) print(doc) <class 'langchain_core.documents.base.Document'>page_content='meow meow🐱 \n' metadata={'line_number': 0, 'source': './meow.txt'}<class 'langchain_core.documents.base.Document'>page_content=' meow meow🐱 \n' metadata={'line_number': 1, 'source': './meow.txt'}<class 'langchain_core.documents.base.Document'>page_content=' meow😻😻' metadata={'line_number': 2, 'source': './meow.txt'} ## Test out the async implementationasync for doc in loader.alazy_load(): print() print(type(doc)) print(doc) <class 'langchain_core.documents.base.Document'>page_content='meow meow🐱 \n' metadata={'line_number': 0, 'source': './meow.txt'}<class 'langchain_core.documents.base.Document'>page_content=' meow meow🐱 \n' metadata={'line_number': 1, 'source': './meow.txt'}<class 'langchain_core.documents.base.Document'>page_content=' meow😻😻' metadata={'line_number': 2, 'source': './meow.txt'} ::: {.callout-tip} `load()` can be helpful in an interactive environment such as a jupyter notebook. Avoid using it for production code since eager loading assumes that all the content can fit into memory, which is not always the case, especially for enterprise data. ::: loader.load() [Document(page_content='meow meow🐱 \n', metadata={'line_number': 0, 'source': './meow.txt'}), Document(page_content=' meow meow🐱 \n', metadata={'line_number': 1, 'source': './meow.txt'}), Document(page_content=' meow😻😻', metadata={'line_number': 2, 'source': './meow.txt'})] Working with Files[​](#working-with-files "Direct link to Working with Files") ------------------------------------------------------------------------------ Many document loaders invovle parsing files. The difference between such loaders usually stems from how the file is parsed rather than how the file is loaded. For example, you can use `open` to read the binary content of either a PDF or a markdown file, but you need different parsing logic to convert that binary data into text. As a result, it can be helpful to decouple the parsing logic from the loading logic, which makes it easier to re-use a given parser regardless of how the data was loaded. ### BaseBlobParser[​](#baseblobparser "Direct link to BaseBlobParser") A `BaseBlobParser` is an interface that accepts a `blob` and outputs a list of `Document` objects. A `blob` is a representation of data that lives either in memory or in a file. LangChain python has a `Blob` primitive which is inspired by the [Blob WebAPI spec](https://developer.mozilla.org/en-US/docs/Web/API/Blob). from langchain_core.document_loaders import BaseBlobParser, Blobclass MyParser(BaseBlobParser): """A simple parser that creates a document from each line.""" def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Parse a blob into a document line by line.""" line_number = 0 with blob.as_bytes_io() as f: for line in f: line_number += 1 yield Document( page_content=line, metadata={"line_number": line_number, "source": blob.source}, ) **API Reference:**[BaseBlobParser](https://api.python.langchain.com/en/latest/document_loaders/langchain_core.document_loaders.base.BaseBlobParser.html) | [Blob](https://api.python.langchain.com/en/latest/document_loaders/langchain_core.document_loaders.blob_loaders.Blob.html) blob = Blob.from_path("./meow.txt")parser = MyParser() list(parser.lazy_parse(blob)) [Document(page_content='meow meow🐱 \n', metadata={'line_number': 1, 'source': './meow.txt'}), Document(page_content=' meow meow🐱 \n', metadata={'line_number': 2, 'source': './meow.txt'}), Document(page_content=' meow😻😻', metadata={'line_number': 3, 'source': './meow.txt'})] Using the **blob** API also allows one to load content direclty from memory without having to read it from a file! blob = Blob(data=b"some data from memory\nmeow")list(parser.lazy_parse(blob)) [Document(page_content='some data from memory\n', metadata={'line_number': 1, 'source': None}), Document(page_content='meow', metadata={'line_number': 2, 'source': None})] ### Blob[​](#blob "Direct link to Blob") Let's take a quick look through some of the Blob API. blob = Blob.from_path("./meow.txt", metadata={"foo": "bar"}) blob.encoding 'utf-8' blob.as_bytes() b'meow meow\xf0\x9f\x90\xb1 \n meow meow\xf0\x9f\x90\xb1 \n meow\xf0\x9f\x98\xbb\xf0\x9f\x98\xbb' blob.as_string() 'meow meow🐱 \n meow meow🐱 \n meow😻😻' blob.as_bytes_io() <contextlib._GeneratorContextManager at 0x743f34324450> blob.metadata {'foo': 'bar'} blob.source './meow.txt' ### Blob Loaders[​](#blob-loaders "Direct link to Blob Loaders") While a parser encapsulates the logic needed to parse binary data into documents, _blob loaders_ encapsulate the logic that's necessary to load blobs from a given storage location. A the moment, `LangChain` only supports `FileSystemBlobLoader`. You can use the `FileSystemBlobLoader` to load blobs and then use the parser to parse them. from langchain_community.document_loaders.blob_loaders import FileSystemBlobLoaderblob_loader = FileSystemBlobLoader(path=".", glob="*.mdx", show_progress=True) **API Reference:**[FileSystemBlobLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.blob_loaders.file_system.FileSystemBlobLoader.html) parser = MyParser()for blob in blob_loader.yield_blobs(): for doc in parser.lazy_parse(blob): print(doc) break 0%| | 0/8 [00:00<?, ?it/s] page_content='# Microsoft Office\n' metadata={'line_number': 1, 'source': 'office_file.mdx'}page_content='# Markdown\n' metadata={'line_number': 1, 'source': 'markdown.mdx'}page_content='# JSON\n' metadata={'line_number': 1, 'source': 'json.mdx'}page_content='---\n' metadata={'line_number': 1, 'source': 'pdf.mdx'}page_content='---\n' metadata={'line_number': 1, 'source': 'index.mdx'}page_content='# File Directory\n' metadata={'line_number': 1, 'source': 'file_directory.mdx'}page_content='# CSV\n' metadata={'line_number': 1, 'source': 'csv.mdx'}page_content='# HTML\n' metadata={'line_number': 1, 'source': 'html.mdx'} ### Generic Loader[​](#generic-loader "Direct link to Generic Loader") LangChain has a `GenericLoader` abstraction which composes a `BlobLoader` with a `BaseBlobParser`. `GenericLoader` is meant to provide standardized classmethods that make it easy to use existing `BlobLoader` implementations. At the moment, only the `FileSystemBlobLoader` is supported. from langchain_community.document_loaders.generic import GenericLoaderloader = GenericLoader.from_filesystem( path=".", glob="*.mdx", show_progress=True, parser=MyParser())for idx, doc in enumerate(loader.lazy_load()): if idx < 5: print(doc)print("... output truncated for demo purposes") **API Reference:**[GenericLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.generic.GenericLoader.html) 0%| | 0/8 [00:00<?, ?it/s] page_content='# Microsoft Office\n' metadata={'line_number': 1, 'source': 'office_file.mdx'}page_content='\n' metadata={'line_number': 2, 'source': 'office_file.mdx'}page_content='>[The Microsoft Office](https://www.office.com/) suite of productivity software includes Microsoft Word, Microsoft Excel, Microsoft PowerPoint, Microsoft Outlook, and Microsoft OneNote. It is available for Microsoft Windows and macOS operating systems. It is also available on Android and iOS.\n' metadata={'line_number': 3, 'source': 'office_file.mdx'}page_content='\n' metadata={'line_number': 4, 'source': 'office_file.mdx'}page_content='This covers how to load commonly used file formats including `DOCX`, `XLSX` and `PPTX` documents into a document format that we can use downstream.\n' metadata={'line_number': 5, 'source': 'office_file.mdx'}... output truncated for demo purposes #### Custom Generic Loader[​](#custom-generic-loader "Direct link to Custom Generic Loader") If you really like creating classes, you can sub-class and create a class to encapsulate the logic together. You can sub-class from this class to load content using an existing loader. from typing import Anyclass MyCustomLoader(GenericLoader): @staticmethod def get_parser(**kwargs: Any) -> BaseBlobParser: """Override this method to associate a default parser with the class.""" return MyParser() loader = MyCustomLoader.from_filesystem(path=".", glob="*.mdx", show_progress=True)for idx, doc in enumerate(loader.lazy_load()): if idx < 5: print(doc)print("... output truncated for demo purposes") 0%| | 0/8 [00:00<?, ?it/s] page_content='# Microsoft Office\n' metadata={'line_number': 1, 'source': 'office_file.mdx'}page_content='\n' metadata={'line_number': 2, 'source': 'office_file.mdx'}page_content='>[The Microsoft Office](https://www.office.com/) suite of productivity software includes Microsoft Word, Microsoft Excel, Microsoft PowerPoint, Microsoft Outlook, and Microsoft OneNote. It is available for Microsoft Windows and macOS operating systems. It is also available on Android and iOS.\n' metadata={'line_number': 3, 'source': 'office_file.mdx'}page_content='\n' metadata={'line_number': 4, 'source': 'office_file.mdx'}page_content='This covers how to load commonly used file formats including `DOCX`, `XLSX` and `PPTX` documents into a document format that we can use downstream.\n' metadata={'line_number': 5, 'source': 'office_file.mdx'}... output truncated for demo purposes [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/document_loader_custom.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How deal with high cardinality categoricals when doing query analysis ](/v0.2/docs/how_to/query_high_cardinality/)[ Next How to split by HTML header ](/v0.2/docs/how_to/HTML_header_metadata_splitter/) * [Overview](#overview) * [Standard Document Loader](#standard-document-loader) * [Interface](#interface) * [Implementation](#implementation) * [Test 🧪](#test-) * [Working with Files](#working-with-files) * [BaseBlobParser](#baseblobparser) * [Blob](#blob) * [Blob Loaders](#blob-loaders) * [Generic Loader](#generic-loader)
null
https://python.langchain.com/v0.2/docs/how_to/contextual_compression/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to do retrieval with contextual compression On this page How to do retrieval with contextual compression =============================================== One challenge with retrieval is that usually you don't know the specific queries your document storage system will face when you ingest data into the system. This means that the information most relevant to a query may be buried in a document with a lot of irrelevant text. Passing that full document through your application can lead to more expensive LLM calls and poorer responses. Contextual compression is meant to fix this. The idea is simple: instead of immediately returning retrieved documents as-is, you can compress them using the context of the given query, so that only the relevant information is returned. “Compressing” here refers to both compressing the contents of an individual document and filtering out documents wholesale. To use the Contextual Compression Retriever, you'll need: * a base retriever * a Document Compressor The Contextual Compression Retriever passes queries to the base retriever, takes the initial documents and passes them through the Document Compressor. The Document Compressor takes a list of documents and shortens it by reducing the contents of documents or dropping documents altogether. Get started[​](#get-started "Direct link to Get started") --------------------------------------------------------- # Helper function for printing docsdef pretty_print_docs(docs): print( f"\n{'-' * 100}\n".join( [f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)] ) ) Using a vanilla vector store retriever[​](#using-a-vanilla-vector-store-retriever "Direct link to Using a vanilla vector store retriever") ------------------------------------------------------------------------------------------------------------------------------------------ Let's start by initializing a simple vector store retriever and storing the 2023 State of the Union speech (in chunks). We can see that given an example question our retriever returns one or two relevant docs and a few irrelevant docs. And even the relevant docs have a lot of irrelevant information in them. from langchain_community.document_loaders import TextLoaderfrom langchain_community.vectorstores import FAISSfrom langchain_openai import OpenAIEmbeddingsfrom langchain_text_splitters import CharacterTextSplitterdocuments = TextLoader("state_of_the_union.txt").load()text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)texts = text_splitter.split_documents(documents)retriever = FAISS.from_documents(texts, OpenAIEmbeddings()).as_retriever()docs = retriever.invoke("What did the president say about Ketanji Brown Jackson")pretty_print_docs(docs) **API Reference:**[TextLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.text.TextLoader.html) | [FAISS](https://api.python.langchain.com/en/latest/vectorstores/langchain_community.vectorstores.faiss.FAISS.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) | [CharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.CharacterTextSplitter.html) Document 1:Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.----------------------------------------------------------------------------------------------------Document 2:A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. We can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. We’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. We’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. We’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders.----------------------------------------------------------------------------------------------------Document 3:And for our LGBTQ+ Americans, let’s finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families is wrong. As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. While it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice. And soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. So tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together. First, beat the opioid epidemic.----------------------------------------------------------------------------------------------------Document 4:Tonight, I’m announcing a crackdown on these companies overcharging American businesses and consumers. And as Wall Street firms take over more nursing homes, quality in those homes has gone down and costs have gone up. That ends on my watch. Medicare is going to set higher standards for nursing homes and make sure your loved ones get the care they deserve and expect. We’ll also cut costs and keep the economy going strong by giving workers a fair shot, provide more training and apprenticeships, hire them based on their skills not degrees. Let’s pass the Paycheck Fairness Act and paid leave. Raise the minimum wage to $15 an hour and extend the Child Tax Credit, so no one has to raise a family in poverty. Let’s increase Pell Grants and increase our historic support of HBCUs, and invest in what Jill—our First Lady who teaches full-time—calls America’s best-kept secret: community colleges. Adding contextual compression with an `LLMChainExtractor`[​](#adding-contextual-compression-with-an-llmchainextractor "Direct link to adding-contextual-compression-with-an-llmchainextractor") ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Now let's wrap our base retriever with a `ContextualCompressionRetriever`. We'll add an `LLMChainExtractor`, which will iterate over the initially returned documents and extract from each only the content that is relevant to the query. from langchain.retrievers import ContextualCompressionRetrieverfrom langchain.retrievers.document_compressors import LLMChainExtractorfrom langchain_openai import OpenAIllm = OpenAI(temperature=0)compressor = LLMChainExtractor.from_llm(llm)compression_retriever = ContextualCompressionRetriever( base_compressor=compressor, base_retriever=retriever)compressed_docs = compression_retriever.invoke( "What did the president say about Ketanji Jackson Brown")pretty_print_docs(compressed_docs) **API Reference:**[ContextualCompressionRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.contextual_compression.ContextualCompressionRetriever.html) | [LLMChainExtractor](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.document_compressors.chain_extract.LLMChainExtractor.html) | [OpenAI](https://api.python.langchain.com/en/latest/llms/langchain_openai.llms.base.OpenAI.html) Document 1:I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. More built-in compressors: filters[​](#more-built-in-compressors-filters "Direct link to More built-in compressors: filters") ----------------------------------------------------------------------------------------------------------------------------- ### `LLMChainFilter`[​](#llmchainfilter "Direct link to llmchainfilter") The `LLMChainFilter` is slightly simpler but more robust compressor that uses an LLM chain to decide which of the initially retrieved documents to filter out and which ones to return, without manipulating the document contents. from langchain.retrievers.document_compressors import LLMChainFilter_filter = LLMChainFilter.from_llm(llm)compression_retriever = ContextualCompressionRetriever( base_compressor=_filter, base_retriever=retriever)compressed_docs = compression_retriever.invoke( "What did the president say about Ketanji Jackson Brown")pretty_print_docs(compressed_docs) **API Reference:**[LLMChainFilter](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.document_compressors.chain_filter.LLMChainFilter.html) Document 1:Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. ### `EmbeddingsFilter`[​](#embeddingsfilter "Direct link to embeddingsfilter") Making an extra LLM call over each retrieved document is expensive and slow. The `EmbeddingsFilter` provides a cheaper and faster option by embedding the documents and query and only returning those documents which have sufficiently similar embeddings to the query. from langchain.retrievers.document_compressors import EmbeddingsFilterfrom langchain_openai import OpenAIEmbeddingsembeddings = OpenAIEmbeddings()embeddings_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.76)compression_retriever = ContextualCompressionRetriever( base_compressor=embeddings_filter, base_retriever=retriever)compressed_docs = compression_retriever.invoke( "What did the president say about Ketanji Jackson Brown")pretty_print_docs(compressed_docs) **API Reference:**[EmbeddingsFilter](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.document_compressors.embeddings_filter.EmbeddingsFilter.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) Document 1:Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.----------------------------------------------------------------------------------------------------Document 2:A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. We can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. We’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. We’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. We’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders. Stringing compressors and document transformers together[​](#stringing-compressors-and-document-transformers-together "Direct link to Stringing compressors and document transformers together") ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ Using the `DocumentCompressorPipeline` we can also easily combine multiple compressors in sequence. Along with compressors we can add `BaseDocumentTransformer`s to our pipeline, which don't perform any contextual compression but simply perform some transformation on a set of documents. For example `TextSplitter`s can be used as document transformers to split documents into smaller pieces, and the `EmbeddingsRedundantFilter` can be used to filter out redundant documents based on embedding similarity between documents. Below we create a compressor pipeline by first splitting our docs into smaller chunks, then removing redundant documents, and then filtering based on relevance to the query. from langchain.retrievers.document_compressors import DocumentCompressorPipelinefrom langchain_community.document_transformers import EmbeddingsRedundantFilterfrom langchain_text_splitters import CharacterTextSplittersplitter = CharacterTextSplitter(chunk_size=300, chunk_overlap=0, separator=". ")redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings)relevant_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.76)pipeline_compressor = DocumentCompressorPipeline( transformers=[splitter, redundant_filter, relevant_filter]) **API Reference:**[DocumentCompressorPipeline](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.document_compressors.base.DocumentCompressorPipeline.html) | [EmbeddingsRedundantFilter](https://api.python.langchain.com/en/latest/document_transformers/langchain_community.document_transformers.embeddings_redundant_filter.EmbeddingsRedundantFilter.html) | [CharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.CharacterTextSplitter.html) compression_retriever = ContextualCompressionRetriever( base_compressor=pipeline_compressor, base_retriever=retriever)compressed_docs = compression_retriever.invoke( "What did the president say about Ketanji Jackson Brown")pretty_print_docs(compressed_docs) Document 1:One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson----------------------------------------------------------------------------------------------------Document 2:As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. While it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year----------------------------------------------------------------------------------------------------Document 3:A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder----------------------------------------------------------------------------------------------------Document 4:Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. We can do both [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/contextual_compression.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to split code ](/v0.2/docs/how_to/code_splitter/)[ Next How to create custom callback handlers ](/v0.2/docs/how_to/custom_callbacks/) * [Get started](#get-started) * [Using a vanilla vector store retriever](#using-a-vanilla-vector-store-retriever) * [Adding contextual compression with an `LLMChainExtractor`](#adding-contextual-compression-with-an-llmchainextractor) * [More built-in compressors: filters](#more-built-in-compressors-filters) * [`LLMChainFilter`](#llmchainfilter) * [`EmbeddingsFilter`](#embeddingsfilter) * [Stringing compressors and document transformers together](#stringing-compressors-and-document-transformers-together)
null
https://python.langchain.com/v0.2/docs/security/
* [](/v0.2/) * Security On this page Security ======== LangChain has a large ecosystem of integrations with various external resources like local and remote file systems, APIs and databases. These integrations allow developers to create versatile applications that combine the power of LLMs with the ability to access, interact with and manipulate external resources. Best practices[​](#best-practices "Direct link to Best practices") ------------------------------------------------------------------ When building such applications developers should remember to follow good security practices: * [**Limit Permissions**](https://en.wikipedia.org/wiki/Principle_of_least_privilege): Scope permissions specifically to the application's need. Granting broad or excessive permissions can introduce significant security vulnerabilities. To avoid such vulnerabilities, consider using read-only credentials, disallowing access to sensitive resources, using sandboxing techniques (such as running inside a container), etc. as appropriate for your application. * **Anticipate Potential Misuse**: Just as humans can err, so can Large Language Models (LLMs). Always assume that any system access or credentials may be used in any way allowed by the permissions they are assigned. For example, if a pair of database credentials allows deleting data, it’s safest to assume that any LLM able to use those credentials may in fact delete data. * [**Defense in Depth**](https://en.wikipedia.org/wiki/Defense_in_depth_\(computing\)): No security technique is perfect. Fine-tuning and good chain design can reduce, but not eliminate, the odds that a Large Language Model (LLM) may make a mistake. It’s best to combine multiple layered security approaches rather than relying on any single layer of defense to ensure security. For example: use both read-only permissions and sandboxing to ensure that LLMs are only able to access data that is explicitly meant for them to use. Risks of not doing so include, but are not limited to: * Data corruption or loss. * Unauthorized access to confidential information. * Compromised performance or availability of critical resources. Example scenarios with mitigation strategies: * A user may ask an agent with access to the file system to delete files that should not be deleted or read the content of files that contain sensitive information. To mitigate, limit the agent to only use a specific directory and only allow it to read or write files that are safe to read or write. Consider further sandboxing the agent by running it in a container. * A user may ask an agent with write access to an external API to write malicious data to the API, or delete data from that API. To mitigate, give the agent read-only API keys, or limit it to only use endpoints that are already resistant to such misuse. * A user may ask an agent with access to a database to drop a table or mutate the schema. To mitigate, scope the credentials to only the tables that the agent needs to access and consider issuing READ-ONLY credentials. If you're building applications that access external resources like file systems, APIs or databases, consider speaking with your company's security team to determine how to best design and secure your applications. Reporting a vulnerability[​](#reporting-a-vulnerability "Direct link to Reporting a vulnerability") --------------------------------------------------------------------------------------------------- Please report security vulnerabilities by email to [[email protected].](mailto:[email protected].) This will ensure the issue is promptly triaged and acted upon as needed. [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/security.md) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous Changes ](/v0.2/docs/versions/v0_2/deprecations/) * [Best practices](#best-practices) * [Reporting a vulnerability](#reporting-a-vulnerability)
null
https://python.langchain.com/v0.2/docs/how_to/HTML_header_metadata_splitter/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to split by HTML header On this page How to split by HTML header =========================== Description and motivation[​](#description-and-motivation "Direct link to Description and motivation") ------------------------------------------------------------------------------------------------------ [HTMLHeaderTextSplitter](https://api.python.langchain.com/en/latest/html/langchain_text_splitters.html.HTMLHeaderTextSplitter.html) is a "structure-aware" chunker that splits text at the HTML element level and adds metadata for each header "relevant" to any given chunk. It can return chunks element by element or combine elements with the same metadata, with the objectives of (a) keeping related text grouped (more or less) semantically and (b) preserving context-rich information encoded in document structures. It can be used with other text splitters as part of a chunking pipeline. It is analogous to the [MarkdownHeaderTextSplitter](/v0.2/docs/how_to/markdown_header_metadata_splitter/) for markdown files. To specify what headers to split on, specify `headers_to_split_on` when instantiating `HTMLHeaderTextSplitter` as shown below. Usage examples[​](#usage-examples "Direct link to Usage examples") ------------------------------------------------------------------ ### 1) How to split HTML strings:[​](#1-how-to-split-html-strings "Direct link to 1) How to split HTML strings:") %pip install -qU langchain-text-splitters from langchain_text_splitters import HTMLHeaderTextSplitterhtml_string = """<!DOCTYPE html><html><body> <div> <h1>Foo</h1> <p>Some intro text about Foo.</p> <div> <h2>Bar main section</h2> <p>Some intro text about Bar.</p> <h3>Bar subsection 1</h3> <p>Some text about the first subtopic of Bar.</p> <h3>Bar subsection 2</h3> <p>Some text about the second subtopic of Bar.</p> </div> <div> <h2>Baz</h2> <p>Some text about Baz</p> </div> <br> <p>Some concluding text about Foo</p> </div></body></html>"""headers_to_split_on = [ ("h1", "Header 1"), ("h2", "Header 2"), ("h3", "Header 3"),]html_splitter = HTMLHeaderTextSplitter(headers_to_split_on)html_header_splits = html_splitter.split_text(html_string)html_header_splits **API Reference:**[HTMLHeaderTextSplitter](https://api.python.langchain.com/en/latest/html/langchain_text_splitters.html.HTMLHeaderTextSplitter.html) [Document(page_content='Foo'), Document(page_content='Some intro text about Foo. \nBar main section Bar subsection 1 Bar subsection 2', metadata={'Header 1': 'Foo'}), Document(page_content='Some intro text about Bar.', metadata={'Header 1': 'Foo', 'Header 2': 'Bar main section'}), Document(page_content='Some text about the first subtopic of Bar.', metadata={'Header 1': 'Foo', 'Header 2': 'Bar main section', 'Header 3': 'Bar subsection 1'}), Document(page_content='Some text about the second subtopic of Bar.', metadata={'Header 1': 'Foo', 'Header 2': 'Bar main section', 'Header 3': 'Bar subsection 2'}), Document(page_content='Baz', metadata={'Header 1': 'Foo'}), Document(page_content='Some text about Baz', metadata={'Header 1': 'Foo', 'Header 2': 'Baz'}), Document(page_content='Some concluding text about Foo', metadata={'Header 1': 'Foo'})] To return each element together with their associated headers, specify `return_each_element=True` when instantiating `HTMLHeaderTextSplitter`: html_splitter = HTMLHeaderTextSplitter( headers_to_split_on, return_each_element=True,)html_header_splits_elements = html_splitter.split_text(html_string) Comparing with the above, where elements are aggregated by their headers: for element in html_header_splits[:2]: print(element) page_content='Foo'page_content='Some intro text about Foo. \nBar main section Bar subsection 1 Bar subsection 2' metadata={'Header 1': 'Foo'} Now each element is returned as a distinct `Document`: for element in html_header_splits_elements[:3]: print(element) page_content='Foo'page_content='Some intro text about Foo.' metadata={'Header 1': 'Foo'}page_content='Bar main section Bar subsection 1 Bar subsection 2' metadata={'Header 1': 'Foo'} #### 2) How to split from a URL or HTML file:[​](#2-how-to-split-from-a-url-or-html-file "Direct link to 2) How to split from a URL or HTML file:") To read directly from a URL, pass the URL string into the `split_text_from_url` method. Similarly, a local HTML file can be passed to the `split_text_from_file` method. url = "https://plato.stanford.edu/entries/goedel/"headers_to_split_on = [ ("h1", "Header 1"), ("h2", "Header 2"), ("h3", "Header 3"), ("h4", "Header 4"),]html_splitter = HTMLHeaderTextSplitter(headers_to_split_on)# for local file use html_splitter.split_text_from_file(<path_to_file>)html_header_splits = html_splitter.split_text_from_url(url) ### 2) How to constrain chunk sizes:[​](#2-how-to-constrain-chunk-sizes "Direct link to 2) How to constrain chunk sizes:") `HTMLHeaderTextSplitter`, which splits based on HTML headers, can be composed with another splitter which constrains splits based on character lengths, such as `RecursiveCharacterTextSplitter`. This can be done using the `.split_documents` method of the second splitter: from langchain_text_splitters import RecursiveCharacterTextSplitterchunk_size = 500chunk_overlap = 30text_splitter = RecursiveCharacterTextSplitter( chunk_size=chunk_size, chunk_overlap=chunk_overlap)# Splitsplits = text_splitter.split_documents(html_header_splits)splits[80:85] **API Reference:**[RecursiveCharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.RecursiveCharacterTextSplitter.html) [Document(page_content='We see that Gödel first tried to reduce the consistency problem for analysis to that of arithmetic. This seemed to require a truth definition for arithmetic, which in turn led to paradoxes, such as the Liar paradox (“This sentence is false”) and Berry’s paradox (“The least number not defined by an expression consisting of just fourteen English words”). Gödel then noticed that such paradoxes would not necessarily arise if truth were replaced by provability. But this means that arithmetic truth', metadata={'Header 1': 'Kurt Gödel', 'Header 2': '2. Gödel’s Mathematical Work', 'Header 3': '2.2 The Incompleteness Theorems', 'Header 4': '2.2.1 The First Incompleteness Theorem'}), Document(page_content='means that arithmetic truth and arithmetic provability are not co-extensive — whence the First Incompleteness Theorem.', metadata={'Header 1': 'Kurt Gödel', 'Header 2': '2. Gödel’s Mathematical Work', 'Header 3': '2.2 The Incompleteness Theorems', 'Header 4': '2.2.1 The First Incompleteness Theorem'}), Document(page_content='This account of Gödel’s discovery was told to Hao Wang very much after the fact; but in Gödel’s contemporary correspondence with Bernays and Zermelo, essentially the same description of his path to the theorems is given. (See Gödel 2003a and Gödel 2003b respectively.) From those accounts we see that the undefinability of truth in arithmetic, a result credited to Tarski, was likely obtained in some form by Gödel by 1931. But he neither publicized nor published the result; the biases logicians', metadata={'Header 1': 'Kurt Gödel', 'Header 2': '2. Gödel’s Mathematical Work', 'Header 3': '2.2 The Incompleteness Theorems', 'Header 4': '2.2.1 The First Incompleteness Theorem'}), Document(page_content='result; the biases logicians had expressed at the time concerning the notion of truth, biases which came vehemently to the fore when Tarski announced his results on the undefinability of truth in formal systems 1935, may have served as a deterrent to Gödel’s publication of that theorem.', metadata={'Header 1': 'Kurt Gödel', 'Header 2': '2. Gödel’s Mathematical Work', 'Header 3': '2.2 The Incompleteness Theorems', 'Header 4': '2.2.1 The First Incompleteness Theorem'}), Document(page_content='We now describe the proof of the two theorems, formulating Gödel’s results in Peano arithmetic. Gödel himself used a system related to that defined in Principia Mathematica, but containing Peano arithmetic. In our presentation of the First and Second Incompleteness Theorems we refer to Peano arithmetic as P, following Gödel’s notation.', metadata={'Header 1': 'Kurt Gödel', 'Header 2': '2. Gödel’s Mathematical Work', 'Header 3': '2.2 The Incompleteness Theorems', 'Header 4': '2.2.2 The proof of the First Incompleteness Theorem'})] Limitations[​](#limitations "Direct link to Limitations") --------------------------------------------------------- There can be quite a bit of structural variation from one HTML document to another, and while `HTMLHeaderTextSplitter` will attempt to attach all "relevant" headers to any given chunk, it can sometimes miss certain headers. For example, the algorithm assumes an informational hierarchy in which headers are always at nodes "above" associated text, i.e. prior siblings, ancestors, and combinations thereof. In the following news article (as of the writing of this document), the document is structured such that the text of the top-level headline, while tagged "h1", is in a _distinct_ subtree from the text elements that we'd expect it to be _"above"_—so we can observe that the "h1" element and its associated text do not show up in the chunk metadata (but, where applicable, we do see "h2" and its associated text): url = "https://www.cnn.com/2023/09/25/weather/el-nino-winter-us-climate/index.html"headers_to_split_on = [ ("h1", "Header 1"), ("h2", "Header 2"),]html_splitter = HTMLHeaderTextSplitter(headers_to_split_on)html_header_splits = html_splitter.split_text_from_url(url)print(html_header_splits[1].page_content[:500]) No two El Niño winters are the same, but many have temperature and precipitation trends in common. Average conditions during an El Niño winter across the continental US. One of the major reasons is the position of the jet stream, which often shifts south during an El Niño winter. This shift typically brings wetter and cooler weather to the South while the North becomes drier and warmer, according to NOAA. Because the jet stream is essentially a river of air that storms flow through, they c [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/HTML_header_metadata_splitter.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous Custom Document Loader ](/v0.2/docs/how_to/document_loader_custom/)[ Next How to split by HTML sections ](/v0.2/docs/how_to/HTML_section_aware_splitter/) * [Description and motivation](#description-and-motivation) * [Usage examples](#usage-examples) * [1) How to split HTML strings:](#1-how-to-split-html-strings) * [2) How to constrain chunk sizes:](#2-how-to-constrain-chunk-sizes) * [Limitations](#limitations)
null
https://python.langchain.com/v0.2/docs/how_to/HTML_section_aware_splitter/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to split by HTML sections On this page How to split by HTML sections ============================= Description and motivation[​](#description-and-motivation "Direct link to Description and motivation") ------------------------------------------------------------------------------------------------------ Similar in concept to the [HTMLHeaderTextSplitter](/v0.2/docs/how_to/HTML_header_metadata_splitter/), the `HTMLSectionSplitter` is a "structure-aware" chunker that splits text at the element level and adds metadata for each header "relevant" to any given chunk. It can return chunks element by element or combine elements with the same metadata, with the objectives of (a) keeping related text grouped (more or less) semantically and (b) preserving context-rich information encoded in document structures. Use `xslt_path` to provide an absolute path to transform the HTML so that it can detect sections based on provided tags. The default is to use the `converting_to_header.xslt` file in the `data_connection/document_transformers` directory. This is for converting the html to a format/layout that is easier to detect sections. For example, `span` based on their font size can be converted to header tags to be detected as a section. Usage examples[​](#usage-examples "Direct link to Usage examples") ------------------------------------------------------------------ ### 1) How to split HTML strings:[​](#1-how-to-split-html-strings "Direct link to 1) How to split HTML strings:") from langchain_text_splitters import HTMLSectionSplitterhtml_string = """ <!DOCTYPE html> <html> <body> <div> <h1>Foo</h1> <p>Some intro text about Foo.</p> <div> <h2>Bar main section</h2> <p>Some intro text about Bar.</p> <h3>Bar subsection 1</h3> <p>Some text about the first subtopic of Bar.</p> <h3>Bar subsection 2</h3> <p>Some text about the second subtopic of Bar.</p> </div> <div> <h2>Baz</h2> <p>Some text about Baz</p> </div> <br> <p>Some concluding text about Foo</p> </div> </body> </html>"""headers_to_split_on = [("h1", "Header 1"), ("h2", "Header 2")]html_splitter = HTMLSectionSplitter(headers_to_split_on)html_header_splits = html_splitter.split_text(html_string)html_header_splits **API Reference:**[HTMLSectionSplitter](https://api.python.langchain.com/en/latest/html/langchain_text_splitters.html.HTMLSectionSplitter.html) [Document(page_content='Foo \n Some intro text about Foo.', metadata={'Header 1': 'Foo'}), Document(page_content='Bar main section \n Some intro text about Bar. \n Bar subsection 1 \n Some text about the first subtopic of Bar. \n Bar subsection 2 \n Some text about the second subtopic of Bar.', metadata={'Header 2': 'Bar main section'}), Document(page_content='Baz \n Some text about Baz \n \n \n Some concluding text about Foo', metadata={'Header 2': 'Baz'})] ### 2) How to constrain chunk sizes:[​](#2-how-to-constrain-chunk-sizes "Direct link to 2) How to constrain chunk sizes:") `HTMLSectionSplitter` can be used with other text splitters as part of a chunking pipeline. Internally, it uses the `RecursiveCharacterTextSplitter` when the section size is larger than the chunk size. It also considers the font size of the text to determine whether it is a section or not based on the determined font size threshold. from langchain_text_splitters import RecursiveCharacterTextSplitterhtml_string = """ <!DOCTYPE html> <html> <body> <div> <h1>Foo</h1> <p>Some intro text about Foo.</p> <div> <h2>Bar main section</h2> <p>Some intro text about Bar.</p> <h3>Bar subsection 1</h3> <p>Some text about the first subtopic of Bar.</p> <h3>Bar subsection 2</h3> <p>Some text about the second subtopic of Bar.</p> </div> <div> <h2>Baz</h2> <p>Some text about Baz</p> </div> <br> <p>Some concluding text about Foo</p> </div> </body> </html>"""headers_to_split_on = [ ("h1", "Header 1"), ("h2", "Header 2"), ("h3", "Header 3"), ("h4", "Header 4"),]html_splitter = HTMLSectionSplitter(headers_to_split_on)html_header_splits = html_splitter.split_text(html_string)chunk_size = 500chunk_overlap = 30text_splitter = RecursiveCharacterTextSplitter( chunk_size=chunk_size, chunk_overlap=chunk_overlap)# Splitsplits = text_splitter.split_documents(html_header_splits)splits **API Reference:**[RecursiveCharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.RecursiveCharacterTextSplitter.html) [Document(page_content='Foo \n Some intro text about Foo.', metadata={'Header 1': 'Foo'}), Document(page_content='Bar main section \n Some intro text about Bar.', metadata={'Header 2': 'Bar main section'}), Document(page_content='Bar subsection 1 \n Some text about the first subtopic of Bar.', metadata={'Header 3': 'Bar subsection 1'}), Document(page_content='Bar subsection 2 \n Some text about the second subtopic of Bar.', metadata={'Header 3': 'Bar subsection 2'}), Document(page_content='Baz \n Some text about Baz \n \n \n Some concluding text about Foo', metadata={'Header 2': 'Baz'})] [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/HTML_section_aware_splitter.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to split by HTML header ](/v0.2/docs/how_to/HTML_header_metadata_splitter/)[ Next How to use the MultiQueryRetriever ](/v0.2/docs/how_to/MultiQueryRetriever/) * [Description and motivation](#description-and-motivation) * [Usage examples](#usage-examples) * [1) How to split HTML strings:](#1-how-to-split-html-strings) * [2) How to constrain chunk sizes:](#2-how-to-constrain-chunk-sizes)
null
https://python.langchain.com/v0.2/docs/how_to/custom_chat_model/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to create a custom chat model class On this page How to create a custom chat model class ======================================= Prerequisites This guide assumes familiarity with the following concepts: * [Chat models](/v0.2/docs/concepts/#chat-models) In this guide, we'll learn how to create a custom chat model using LangChain abstractions. Wrapping your LLM with the standard [`BaseChatModel`](https://api.python.langchain.com/en/latest/language_models/langchain_core.language_models.chat_models.BaseChatModel.html) interface allow you to use your LLM in existing LangChain programs with minimal code modifications! As an bonus, your LLM will automatically become a LangChain `Runnable` and will benefit from some optimizations out of the box (e.g., batch via a threadpool), async support, the `astream_events` API, etc. Inputs and outputs[​](#inputs-and-outputs "Direct link to Inputs and outputs") ------------------------------------------------------------------------------ First, we need to talk about **messages**, which are the inputs and outputs of chat models. ### Messages[​](#messages "Direct link to Messages") Chat models take messages as inputs and return a message as output. LangChain has a few [built-in message types](/v0.2/docs/concepts/#message-types): Message Type Description `SystemMessage` Used for priming AI behavior, usually passed in as the first of a sequence of input messages. `HumanMessage` Represents a message from a person interacting with the chat model. `AIMessage` Represents a message from the chat model. This can be either text or a request to invoke a tool. `FunctionMessage` / `ToolMessage` Message for passing the results of tool invocation back to the model. `AIMessageChunk` / `HumanMessageChunk` / ... Chunk variant of each type of message. ::: {.callout-note} `ToolMessage` and `FunctionMessage` closely follow OpenAI's `function` and `tool` roles. This is a rapidly developing field and as more models add function calling capabilities. Expect that there will be additions to this schema. ::: from langchain_core.messages import ( AIMessage, BaseMessage, FunctionMessage, HumanMessage, SystemMessage, ToolMessage,) **API Reference:**[AIMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html) | [BaseMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.base.BaseMessage.html) | [FunctionMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.function.FunctionMessage.html) | [HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) | [SystemMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.system.SystemMessage.html) | [ToolMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolMessage.html) ### Streaming Variant[​](#streaming-variant "Direct link to Streaming Variant") All the chat messages have a streaming variant that contains `Chunk` in the name. from langchain_core.messages import ( AIMessageChunk, FunctionMessageChunk, HumanMessageChunk, SystemMessageChunk, ToolMessageChunk,) **API Reference:**[AIMessageChunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html) | [FunctionMessageChunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.function.FunctionMessageChunk.html) | [HumanMessageChunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessageChunk.html) | [SystemMessageChunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.system.SystemMessageChunk.html) | [ToolMessageChunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolMessageChunk.html) These chunks are used when streaming output from chat models, and they all define an additive property! AIMessageChunk(content="Hello") + AIMessageChunk(content=" World!") AIMessageChunk(content='Hello World!') Base Chat Model[​](#base-chat-model "Direct link to Base Chat Model") --------------------------------------------------------------------- Let's implement a chat model that echoes back the first `n` characetrs of the last message in the prompt! To do so, we will inherit from `BaseChatModel` and we'll need to implement the following: Method/Property Description Required/Optional `_generate` Use to generate a chat result from a prompt Required `_llm_type` (property) Used to uniquely identify the type of the model. Used for logging. Required `_identifying_params` (property) Represent model parameterization for tracing purposes. Optional `_stream` Use to implement streaming. Optional `_agenerate` Use to implement a native async method. Optional `_astream` Use to implement async version of `_stream`. Optional tip The `_astream` implementation uses `run_in_executor` to launch the sync `_stream` in a separate thread if `_stream` is implemented, otherwise it fallsback to use `_agenerate`. You can use this trick if you want to reuse the `_stream` implementation, but if you're able to implement code that's natively async that's a better solution since that code will run with less overhead. ### Implementation[​](#implementation "Direct link to Implementation") from typing import Any, AsyncIterator, Dict, Iterator, List, Optionalfrom langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun,)from langchain_core.language_models import BaseChatModel, SimpleChatModelfrom langchain_core.messages import AIMessageChunk, BaseMessage, HumanMessagefrom langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResultfrom langchain_core.runnables import run_in_executorclass CustomChatModelAdvanced(BaseChatModel): """A custom chat model that echoes the first `n` characters of the input. When contributing an implementation to LangChain, carefully document the model including the initialization parameters, include an example of how to initialize the model and include any relevant links to the underlying models documentation or API. Example: .. code-block:: python model = CustomChatModel(n=2) result = model.invoke([HumanMessage(content="hello")]) result = model.batch([[HumanMessage(content="hello")], [HumanMessage(content="world")]]) """ model_name: str """The name of the model""" n: int """The number of characters from the last message of the prompt to be echoed.""" def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: """Override the _generate method to implement the chat model logic. This can be a call to an API, a call to a local model, or any other implementation that generates a response to the input prompt. Args: messages: the prompt composed of a list of messages. stop: a list of strings on which the model should stop generating. If generation stops due to a stop token, the stop token itself SHOULD BE INCLUDED as part of the output. This is not enforced across models right now, but it's a good practice to follow since it makes it much easier to parse the output of the model downstream and understand why generation stopped. run_manager: A run manager with callbacks for the LLM. """ # Replace this with actual logic to generate a response from a list # of messages. last_message = messages[-1] tokens = last_message.content[: self.n] message = AIMessage( content=tokens, additional_kwargs={}, # Used to add additional payload (e.g., function calling request) response_metadata={ # Use for response metadata "time_in_seconds": 3, }, ) ## generation = ChatGeneration(message=message) return ChatResult(generations=[generation]) def _stream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: """Stream the output of the model. This method should be implemented if the model can generate output in a streaming fashion. If the model does not support streaming, do not implement it. In that case streaming requests will be automatically handled by the _generate method. Args: messages: the prompt composed of a list of messages. stop: a list of strings on which the model should stop generating. If generation stops due to a stop token, the stop token itself SHOULD BE INCLUDED as part of the output. This is not enforced across models right now, but it's a good practice to follow since it makes it much easier to parse the output of the model downstream and understand why generation stopped. run_manager: A run manager with callbacks for the LLM. """ last_message = messages[-1] tokens = last_message.content[: self.n] for token in tokens: chunk = ChatGenerationChunk(message=AIMessageChunk(content=token)) if run_manager: # This is optional in newer versions of LangChain # The on_llm_new_token will be called automatically run_manager.on_llm_new_token(token, chunk=chunk) yield chunk # Let's add some other information (e.g., response metadata) chunk = ChatGenerationChunk( message=AIMessageChunk(content="", response_metadata={"time_in_sec": 3}) ) if run_manager: # This is optional in newer versions of LangChain # The on_llm_new_token will be called automatically run_manager.on_llm_new_token(token, chunk=chunk) yield chunk @property def _llm_type(self) -> str: """Get the type of language model used by this chat model.""" return "echoing-chat-model-advanced" @property def _identifying_params(self) -> Dict[str, Any]: """Return a dictionary of identifying parameters. This information is used by the LangChain callback system, which is used for tracing purposes make it possible to monitor LLMs. """ return { # The model name allows users to specify custom token counting # rules in LLM monitoring applications (e.g., in LangSmith users # can provide per token pricing for their model and monitor # costs for the given LLM.) "model_name": self.model_name, } **API Reference:**[AsyncCallbackManagerForLLMRun](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.AsyncCallbackManagerForLLMRun.html) | [CallbackManagerForLLMRun](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.CallbackManagerForLLMRun.html) | [BaseChatModel](https://api.python.langchain.com/en/latest/language_models/langchain_core.language_models.chat_models.BaseChatModel.html) | [SimpleChatModel](https://api.python.langchain.com/en/latest/language_models/langchain_core.language_models.chat_models.SimpleChatModel.html) | [AIMessageChunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html) | [BaseMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.base.BaseMessage.html) | [HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) | [ChatGeneration](https://api.python.langchain.com/en/latest/outputs/langchain_core.outputs.chat_generation.ChatGeneration.html) | [ChatGenerationChunk](https://api.python.langchain.com/en/latest/outputs/langchain_core.outputs.chat_generation.ChatGenerationChunk.html) | [ChatResult](https://api.python.langchain.com/en/latest/outputs/langchain_core.outputs.chat_result.ChatResult.html) | [run\_in\_executor](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.config.run_in_executor.html) ### Let's test it 🧪[​](#lets-test-it- "Direct link to Let's test it 🧪") The chat model will implement the standard `Runnable` interface of LangChain which many of the LangChain abstractions support! model = CustomChatModelAdvanced(n=3, model_name="my_custom_model")model.invoke( [ HumanMessage(content="hello!"), AIMessage(content="Hi there human!"), HumanMessage(content="Meow!"), ]) AIMessage(content='Meo', response_metadata={'time_in_seconds': 3}, id='run-ddb42bd6-4fdd-4bd2-8be5-e11b67d3ac29-0') model.invoke("hello") AIMessage(content='hel', response_metadata={'time_in_seconds': 3}, id='run-4d3cc912-44aa-454b-977b-ca02be06c12e-0') model.batch(["hello", "goodbye"]) [AIMessage(content='hel', response_metadata={'time_in_seconds': 3}, id='run-9620e228-1912-4582-8aa1-176813afec49-0'), AIMessage(content='goo', response_metadata={'time_in_seconds': 3}, id='run-1ce8cdf8-6f75-448e-82f7-1bb4a121df93-0')] for chunk in model.stream("cat"): print(chunk.content, end="|") c|a|t|| Please see the implementation of `_astream` in the model! If you do not implement it, then no output will stream.! async for chunk in model.astream("cat"): print(chunk.content, end="|") c|a|t|| Let's try to use the astream events API which will also help double check that all the callbacks were implemented! async for event in model.astream_events("cat", version="v1"): print(event) {'event': 'on_chat_model_start', 'run_id': '125a2a16-b9cd-40de-aa08-8aa9180b07d0', 'name': 'CustomChatModelAdvanced', 'tags': [], 'metadata': {}, 'data': {'input': 'cat'}}{'event': 'on_chat_model_stream', 'run_id': '125a2a16-b9cd-40de-aa08-8aa9180b07d0', 'tags': [], 'metadata': {}, 'name': 'CustomChatModelAdvanced', 'data': {'chunk': AIMessageChunk(content='c', id='run-125a2a16-b9cd-40de-aa08-8aa9180b07d0')}}{'event': 'on_chat_model_stream', 'run_id': '125a2a16-b9cd-40de-aa08-8aa9180b07d0', 'tags': [], 'metadata': {}, 'name': 'CustomChatModelAdvanced', 'data': {'chunk': AIMessageChunk(content='a', id='run-125a2a16-b9cd-40de-aa08-8aa9180b07d0')}}{'event': 'on_chat_model_stream', 'run_id': '125a2a16-b9cd-40de-aa08-8aa9180b07d0', 'tags': [], 'metadata': {}, 'name': 'CustomChatModelAdvanced', 'data': {'chunk': AIMessageChunk(content='t', id='run-125a2a16-b9cd-40de-aa08-8aa9180b07d0')}}{'event': 'on_chat_model_stream', 'run_id': '125a2a16-b9cd-40de-aa08-8aa9180b07d0', 'tags': [], 'metadata': {}, 'name': 'CustomChatModelAdvanced', 'data': {'chunk': AIMessageChunk(content='', response_metadata={'time_in_sec': 3}, id='run-125a2a16-b9cd-40de-aa08-8aa9180b07d0')}}{'event': 'on_chat_model_end', 'name': 'CustomChatModelAdvanced', 'run_id': '125a2a16-b9cd-40de-aa08-8aa9180b07d0', 'tags': [], 'metadata': {}, 'data': {'output': AIMessageChunk(content='cat', response_metadata={'time_in_sec': 3}, id='run-125a2a16-b9cd-40de-aa08-8aa9180b07d0')}}``````output/home/eugene/src/langchain/libs/core/langchain_core/_api/beta_decorator.py:87: LangChainBetaWarning: This API is in beta and may change in the future. warn_beta( Contributing[​](#contributing "Direct link to Contributing") ------------------------------------------------------------ We appreciate all chat model integration contributions. Here's a checklist to help make sure your contribution gets added to LangChain: Documentation: * The model contains doc-strings for all initialization arguments, as these will be surfaced in the [APIReference](https://api.python.langchain.com/en/stable/langchain_api_reference.html). * The class doc-string for the model contains a link to the model API if the model is powered by a service. Tests: * Add unit or integration tests to the overridden methods. Verify that `invoke`, `ainvoke`, `batch`, `stream` work if you've over-ridden the corresponding code. Streaming (if you're implementing it): * Implement the \_stream method to get streaming working Stop Token Behavior: * Stop token should be respected * Stop token should be INCLUDED as part of the response Secret API Keys: * If your model connects to an API it will likely accept API keys as part of its initialization. Use Pydantic's `SecretStr` type for secrets, so they don't get accidentally printed out when folks print the model. Identifying Params: * Include a `model_name` in identifying params Optimizations: Consider providing native async support to reduce the overhead from the model! * Provided a native async of `_agenerate` (used by `ainvoke`) * Provided a native async of `_astream` (used by `astream`) Next steps[​](#next-steps "Direct link to Next steps") ------------------------------------------------------ You've now learned how to create your own custom chat models. Next, check out the other how-to guides chat models in this section, like [how to get a model to return structured output](/v0.2/docs/how_to/structured_output/) or [how to track chat model token usage](/v0.2/docs/how_to/chat_token_usage_tracking/). [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/custom_chat_model.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to create custom callback handlers ](/v0.2/docs/how_to/custom_callbacks/)[ Next How to create a custom LLM class ](/v0.2/docs/how_to/custom_llm/) * [Inputs and outputs](#inputs-and-outputs) * [Messages](#messages) * [Streaming Variant](#streaming-variant) * [Base Chat Model](#base-chat-model) * [Implementation](#implementation) * [Let's test it 🧪](#lets-test-it-) * [Contributing](#contributing) * [Next steps](#next-steps)
null
https://python.langchain.com/v0.2/docs/how_to/custom_callbacks/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to create custom callback handlers On this page How to create custom callback handlers ====================================== Prerequisites This guide assumes familiarity with the following concepts: * [Callbacks](/v0.2/docs/concepts/#callbacks) LangChain has some built-in callback handlers, but you will often want to create your own handlers with custom logic. To create a custom callback handler, we need to determine the [event(s)](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) we want our callback handler to handle as well as what we want our callback handler to do when the event is triggered. Then all we need to do is attach the callback handler to the object, for example via [the constructor](/v0.2/docs/how_to/callbacks_constructor/) or [at runtime](/v0.2/docs/how_to/callbacks_runtime/). In the example below, we'll implement streaming with a custom handler. In our custom callback handler `MyCustomHandler`, we implement the `on_llm_new_token` handler to print the token we have just received. We then attach our custom handler to the model object as a constructor callback. from langchain_anthropic import ChatAnthropicfrom langchain_core.callbacks import BaseCallbackHandlerfrom langchain_core.prompts import ChatPromptTemplateclass MyCustomHandler(BaseCallbackHandler): def on_llm_new_token(self, token: str, **kwargs) -> None: print(f"My custom handler, token: {token}")prompt = ChatPromptTemplate.from_messages(["Tell me a joke about {animal}"])# To enable streaming, we pass in `streaming=True` to the ChatModel constructor# Additionally, we pass in our custom handler as a list to the callbacks parametermodel = ChatAnthropic( model="claude-3-sonnet-20240229", streaming=True, callbacks=[MyCustomHandler()])chain = prompt | modelresponse = chain.invoke({"animal": "bears"}) **API Reference:**[ChatAnthropic](https://api.python.langchain.com/en/latest/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html) | [BaseCallbackHandler](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) My custom handler, token: HereMy custom handler, token: 'sMy custom handler, token: aMy custom handler, token: bearMy custom handler, token: jokeMy custom handler, token: forMy custom handler, token: youMy custom handler, token: :My custom handler, token: WhyMy custom handler, token: diMy custom handler, token: d theMy custom handler, token: bearMy custom handler, token: dissolMy custom handler, token: veMy custom handler, token: inMy custom handler, token: waterMy custom handler, token: ?My custom handler, token: BecauseMy custom handler, token: itMy custom handler, token: wasMy custom handler, token: aMy custom handler, token: polarMy custom handler, token: bearMy custom handler, token: ! You can see [this reference page](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) for a list of events you can handle. Note that the `handle_chain_*` events run for most LCEL runnables. Next steps[​](#next-steps "Direct link to Next steps") ------------------------------------------------------ You've now learned how to create your own custom callback handlers. Next, check out the other how-to guides in this section, such as [how to attach callbacks to a runnable](/v0.2/docs/how_to/callbacks_attach/). [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/custom_callbacks.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to do retrieval with contextual compression ](/v0.2/docs/how_to/contextual_compression/)[ Next How to create a custom chat model class ](/v0.2/docs/how_to/custom_chat_model/) * [Next steps](#next-steps)
null
https://python.langchain.com/v0.2/docs/how_to/custom_llm/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to create a custom LLM class On this page How to create a custom LLM class ================================ This notebook goes over how to create a custom LLM wrapper, in case you want to use your own LLM or a different wrapper than one that is supported in LangChain. Wrapping your LLM with the standard `LLM` interface allow you to use your LLM in existing LangChain programs with minimal code modifications! As an bonus, your LLM will automatically become a LangChain `Runnable` and will benefit from some optimizations out of the box, async support, the `astream_events` API, etc. Implementation[​](#implementation "Direct link to Implementation") ------------------------------------------------------------------ There are only two required things that a custom LLM needs to implement: Method Description `_call` Takes in a string and some optional stop words, and returns a string. Used by `invoke`. `_llm_type` A property that returns a string, used for logging purposes only. Optional implementations: Method Description `_identifying_params` Used to help with identifying the model and printing the LLM; should return a dictionary. This is a **@property**. `_acall` Provides an async native implementation of `_call`, used by `ainvoke`. `_stream` Method to stream the output token by token. `_astream` Provides an async native implementation of `_stream`; in newer LangChain versions, defaults to `_stream`. Let's implement a simple custom LLM that just returns the first n characters of the input. from typing import Any, Dict, Iterator, List, Mapping, Optionalfrom langchain_core.callbacks.manager import CallbackManagerForLLMRunfrom langchain_core.language_models.llms import LLMfrom langchain_core.outputs import GenerationChunkclass CustomLLM(LLM): """A custom chat model that echoes the first `n` characters of the input. When contributing an implementation to LangChain, carefully document the model including the initialization parameters, include an example of how to initialize the model and include any relevant links to the underlying models documentation or API. Example: .. code-block:: python model = CustomChatModel(n=2) result = model.invoke([HumanMessage(content="hello")]) result = model.batch([[HumanMessage(content="hello")], [HumanMessage(content="world")]]) """ n: int """The number of characters from the last message of the prompt to be echoed.""" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Run the LLM on the given input. Override this method to implement the LLM logic. Args: prompt: The prompt to generate from. stop: Stop words to use when generating. Model output is cut off at the first occurrence of any of the stop substrings. If stop tokens are not supported consider raising NotImplementedError. run_manager: Callback manager for the run. **kwargs: Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns: The model output as a string. Actual completions SHOULD NOT include the prompt. """ if stop is not None: raise ValueError("stop kwargs are not permitted.") return prompt[: self.n] def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: """Stream the LLM on the given prompt. This method should be overridden by subclasses that support streaming. If not implemented, the default behavior of calls to stream will be to fallback to the non-streaming version of the model and return the output as a single chunk. Args: prompt: The prompt to generate from. stop: Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. run_manager: Callback manager for the run. **kwargs: Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns: An iterator of GenerationChunks. """ for char in prompt[: self.n]: chunk = GenerationChunk(text=char) if run_manager: run_manager.on_llm_new_token(chunk.text, chunk=chunk) yield chunk @property def _identifying_params(self) -> Dict[str, Any]: """Return a dictionary of identifying parameters.""" return { # The model name allows users to specify custom token counting # rules in LLM monitoring applications (e.g., in LangSmith users # can provide per token pricing for their model and monitor # costs for the given LLM.) "model_name": "CustomChatModel", } @property def _llm_type(self) -> str: """Get the type of language model used by this chat model. Used for logging purposes only.""" return "custom" **API Reference:**[CallbackManagerForLLMRun](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.CallbackManagerForLLMRun.html) | [LLM](https://api.python.langchain.com/en/latest/language_models/langchain_core.language_models.llms.LLM.html) | [GenerationChunk](https://api.python.langchain.com/en/latest/outputs/langchain_core.outputs.generation.GenerationChunk.html) ### Let's test it 🧪[​](#lets-test-it- "Direct link to Let's test it 🧪") This LLM will implement the standard `Runnable` interface of LangChain which many of the LangChain abstractions support! llm = CustomLLM(n=5)print(llm) CustomLLMParams: {'model_name': 'CustomChatModel'} llm.invoke("This is a foobar thing") 'This ' await llm.ainvoke("world") 'world' llm.batch(["woof woof woof", "meow meow meow"]) ['woof ', 'meow '] await llm.abatch(["woof woof woof", "meow meow meow"]) ['woof ', 'meow '] async for token in llm.astream("hello"): print(token, end="|", flush=True) h|e|l|l|o| Let's confirm that in integrates nicely with other `LangChain` APIs. from langchain_core.prompts import ChatPromptTemplate **API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) prompt = ChatPromptTemplate.from_messages( [("system", "you are a bot"), ("human", "{input}")]) llm = CustomLLM(n=7)chain = prompt | llm idx = 0async for event in chain.astream_events({"input": "hello there!"}, version="v1"): print(event) idx += 1 if idx > 7: # Truncate break {'event': 'on_chain_start', 'run_id': '05f24b4f-7ea3-4fb6-8417-3aa21633462f', 'name': 'RunnableSequence', 'tags': [], 'metadata': {}, 'data': {'input': {'input': 'hello there!'}}}{'event': 'on_prompt_start', 'name': 'ChatPromptTemplate', 'run_id': '7e996251-a926-4344-809e-c425a9846d21', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'input': {'input': 'hello there!'}}}{'event': 'on_prompt_end', 'name': 'ChatPromptTemplate', 'run_id': '7e996251-a926-4344-809e-c425a9846d21', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'input': {'input': 'hello there!'}, 'output': ChatPromptValue(messages=[SystemMessage(content='you are a bot'), HumanMessage(content='hello there!')])}}{'event': 'on_llm_start', 'name': 'CustomLLM', 'run_id': 'a8766beb-10f4-41de-8750-3ea7cf0ca7e2', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'input': {'prompts': ['System: you are a bot\nHuman: hello there!']}}}{'event': 'on_llm_stream', 'name': 'CustomLLM', 'run_id': 'a8766beb-10f4-41de-8750-3ea7cf0ca7e2', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': 'S'}}{'event': 'on_chain_stream', 'run_id': '05f24b4f-7ea3-4fb6-8417-3aa21633462f', 'tags': [], 'metadata': {}, 'name': 'RunnableSequence', 'data': {'chunk': 'S'}}{'event': 'on_llm_stream', 'name': 'CustomLLM', 'run_id': 'a8766beb-10f4-41de-8750-3ea7cf0ca7e2', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': 'y'}}{'event': 'on_chain_stream', 'run_id': '05f24b4f-7ea3-4fb6-8417-3aa21633462f', 'tags': [], 'metadata': {}, 'name': 'RunnableSequence', 'data': {'chunk': 'y'}} Contributing[​](#contributing "Direct link to Contributing") ------------------------------------------------------------ We appreciate all chat model integration contributions. Here's a checklist to help make sure your contribution gets added to LangChain: Documentation: * The model contains doc-strings for all initialization arguments, as these will be surfaced in the [APIReference](https://api.python.langchain.com/en/stable/langchain_api_reference.html). * The class doc-string for the model contains a link to the model API if the model is powered by a service. Tests: * Add unit or integration tests to the overridden methods. Verify that `invoke`, `ainvoke`, `batch`, `stream` work if you've over-ridden the corresponding code. Streaming (if you're implementing it): * Make sure to invoke the `on_llm_new_token` callback * `on_llm_new_token` is invoked BEFORE yielding the chunk Stop Token Behavior: * Stop token should be respected * Stop token should be INCLUDED as part of the response Secret API Keys: * If your model connects to an API it will likely accept API keys as part of its initialization. Use Pydantic's `SecretStr` type for secrets, so they don't get accidentally printed out when folks print the model. [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/custom_llm.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to create a custom chat model class ](/v0.2/docs/how_to/custom_chat_model/)[ Next Custom Retriever ](/v0.2/docs/how_to/custom_retriever/) * [Implementation](#implementation) * [Let's test it 🧪](#lets-test-it-) * [Contributing](#contributing)
null
https://python.langchain.com/v0.2/docs/how_to/MultiQueryRetriever/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to use the MultiQueryRetriever On this page How to use the MultiQueryRetriever ================================== Distance-based vector database retrieval embeds (represents) queries in high-dimensional space and finds similar embedded documents based on a distance metric. But, retrieval may produce different results with subtle changes in query wording, or if the embeddings do not capture the semantics of the data well. Prompt engineering / tuning is sometimes done to manually address these problems, but can be tedious. The [MultiQueryRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.multi_query.MultiQueryRetriever.html) automates the process of prompt tuning by using an LLM to generate multiple queries from different perspectives for a given user input query. For each query, it retrieves a set of relevant documents and takes the unique union across all queries to get a larger set of potentially relevant documents. By generating multiple perspectives on the same question, the `MultiQueryRetriever` can mitigate some of the limitations of the distance-based retrieval and get a richer set of results. Let's build a vectorstore using the [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post by Lilian Weng from the [RAG tutorial](/v0.2/docs/tutorials/rag/): # Build a sample vectorDBfrom langchain_chroma import Chromafrom langchain_community.document_loaders import WebBaseLoaderfrom langchain_openai import OpenAIEmbeddingsfrom langchain_text_splitters import RecursiveCharacterTextSplitter# Load blog postloader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")data = loader.load()# Splittext_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)splits = text_splitter.split_documents(data)# VectorDBembedding = OpenAIEmbeddings()vectordb = Chroma.from_documents(documents=splits, embedding=embedding) **API Reference:**[WebBaseLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.web_base.WebBaseLoader.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) | [RecursiveCharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.RecursiveCharacterTextSplitter.html) #### Simple usage[​](#simple-usage "Direct link to Simple usage") Specify the LLM to use for query generation, and the retriever will do the rest. from langchain.retrievers.multi_query import MultiQueryRetrieverfrom langchain_openai import ChatOpenAIquestion = "What are the approaches to Task Decomposition?"llm = ChatOpenAI(temperature=0)retriever_from_llm = MultiQueryRetriever.from_llm( retriever=vectordb.as_retriever(), llm=llm) **API Reference:**[MultiQueryRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.multi_query.MultiQueryRetriever.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) # Set logging for the queriesimport logginglogging.basicConfig()logging.getLogger("langchain.retrievers.multi_query").setLevel(logging.INFO) unique_docs = retriever_from_llm.invoke(question)len(unique_docs) INFO:langchain.retrievers.multi_query:Generated queries: ['1. How can Task Decomposition be achieved through different methods?', '2. What strategies are commonly used for Task Decomposition?', '3. What are the various techniques for breaking down tasks in Task Decomposition?'] 5 Note that the underlying queries generated by the retriever are logged at the `INFO` level. #### Supplying your own prompt[​](#supplying-your-own-prompt "Direct link to Supplying your own prompt") Under the hood, `MultiQueryRetriever` generates queries using a specific [prompt](https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/multi_query.html#MultiQueryRetriever). To customize this prompt: 1. Make a [PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html) with an input variable for the question; 2. Implement an [output parser](/v0.2/docs/concepts/#output-parsers) like the one below to split the result into a list of queries. The prompt and output parser together must support the generation of a list of queries. from typing import Listfrom langchain_core.output_parsers import BaseOutputParserfrom langchain_core.prompts import PromptTemplatefrom langchain_core.pydantic_v1 import BaseModel, Field# Output parser will split the LLM result into a list of queriesclass LineListOutputParser(BaseOutputParser[List[str]]): """Output parser for a list of lines.""" def parse(self, text: str) -> List[str]: lines = text.strip().split("\n") return linesoutput_parser = LineListOutputParser()QUERY_PROMPT = PromptTemplate( input_variables=["question"], template="""You are an AI language model assistant. Your task is to generate five different versions of the given user question to retrieve relevant documents from a vector database. By generating multiple perspectives on the user question, your goal is to help the user overcome some of the limitations of the distance-based similarity search. Provide these alternative questions separated by newlines. Original question: {question}""",)llm = ChatOpenAI(temperature=0)# Chainllm_chain = QUERY_PROMPT | llm | output_parser# Other inputsquestion = "What are the approaches to Task Decomposition?" **API Reference:**[BaseOutputParser](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.base.BaseOutputParser.html) | [PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html) # Runretriever = MultiQueryRetriever( retriever=vectordb.as_retriever(), llm_chain=llm_chain, parser_key="lines") # "lines" is the key (attribute name) of the parsed output# Resultsunique_docs = retriever.invoke("What does the course say about regression?")len(unique_docs) INFO:langchain.retrievers.multi_query:Generated queries: ['1. Can you provide insights on regression from the course material?', '2. How is regression discussed in the course content?', '3. What information does the course offer about regression?', '4. In what way is regression covered in the course?', '5. What are the teachings of the course regarding regression?'] 9 [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/MultiQueryRetriever.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to split by HTML sections ](/v0.2/docs/how_to/HTML_section_aware_splitter/)[ Next How to add scores to retriever results ](/v0.2/docs/how_to/add_scores_retriever/)
null
https://python.langchain.com/v0.2/docs/how_to/custom_retriever/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * Custom Retriever On this page How to create a custom Retriever ================================ Overview[​](#overview "Direct link to Overview") ------------------------------------------------ Many LLM applications involve retrieving information from external data sources using a `Retriever`. A retriever is responsible for retrieving a list of relevant `Documents` to a given user `query`. The retrieved documents are often formatted into prompts that are fed into an LLM, allowing the LLM to use the information in the to generate an appropriate response (e.g., answering a user question based on a knowledge base). Interface[​](#interface "Direct link to Interface") --------------------------------------------------- To create your own retriever, you need to extend the `BaseRetriever` class and implement the following methods: Method Description Required/Optional `_get_relevant_documents` Get documents relevant to a query. Required `_aget_relevant_documents` Implement to provide async native support. Optional The logic inside of `_get_relevant_documents` can involve arbitrary calls to a database or to the web using requests. tip By inherting from `BaseRetriever`, your retriever automatically becomes a LangChain [Runnable](/v0.2/docs/concepts/#interface) and will gain the standard `Runnable` functionality out of the box! info You can use a `RunnableLambda` or `RunnableGenerator` to implement a retriever. The main benefit of implementing a retriever as a `BaseRetriever` vs. a `RunnableLambda` (a custom [runnable function](/v0.2/docs/how_to/functions/)) is that a `BaseRetriever` is a well known LangChain entity so some tooling for monitoring may implement specialized behavior for retrievers. Another difference is that a `BaseRetriever` will behave slightly differently from `RunnableLambda` in some APIs; e.g., the `start` event in `astream_events` API will be `on_retriever_start` instead of `on_chain_start`. Example[​](#example "Direct link to Example") --------------------------------------------- Let's implement a toy retriever that returns all documents whose text contains the text in the user query. from typing import Listfrom langchain_core.callbacks import CallbackManagerForRetrieverRunfrom langchain_core.documents import Documentfrom langchain_core.retrievers import BaseRetrieverclass ToyRetriever(BaseRetriever): """A toy retriever that contains the top k documents that contain the user query. This retriever only implements the sync method _get_relevant_documents. If the retriever were to involve file access or network access, it could benefit from a native async implementation of `_aget_relevant_documents`. As usual, with Runnables, there's a default async implementation that's provided that delegates to the sync implementation running on another thread. """ documents: List[Document] """List of documents to retrieve from.""" k: int """Number of top results to return""" def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: """Sync implementations for retriever.""" matching_documents = [] for document in documents: if len(matching_documents) > self.k: return matching_documents if query.lower() in document.page_content.lower(): matching_documents.append(document) return matching_documents # Optional: Provide a more efficient native implementation by overriding # _aget_relevant_documents # async def _aget_relevant_documents( # self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun # ) -> List[Document]: # """Asynchronously get documents relevant to a query. # Args: # query: String to find relevant documents for # run_manager: The callbacks handler to use # Returns: # List of relevant documents # """ **API Reference:**[CallbackManagerForRetrieverRun](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.CallbackManagerForRetrieverRun.html) | [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) | [BaseRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain_core.retrievers.BaseRetriever.html) Test it 🧪[​](#test-it- "Direct link to Test it 🧪") ---------------------------------------------------- documents = [ Document( page_content="Dogs are great companions, known for their loyalty and friendliness.", metadata={"type": "dog", "trait": "loyalty"}, ), Document( page_content="Cats are independent pets that often enjoy their own space.", metadata={"type": "cat", "trait": "independence"}, ), Document( page_content="Goldfish are popular pets for beginners, requiring relatively simple care.", metadata={"type": "fish", "trait": "low maintenance"}, ), Document( page_content="Parrots are intelligent birds capable of mimicking human speech.", metadata={"type": "bird", "trait": "intelligence"}, ), Document( page_content="Rabbits are social animals that need plenty of space to hop around.", metadata={"type": "rabbit", "trait": "social"}, ),]retriever = ToyRetriever(documents=documents, k=3) retriever.invoke("that") [Document(page_content='Cats are independent pets that often enjoy their own space.', metadata={'type': 'cat', 'trait': 'independence'}), Document(page_content='Rabbits are social animals that need plenty of space to hop around.', metadata={'type': 'rabbit', 'trait': 'social'})] It's a **runnable** so it'll benefit from the standard Runnable Interface! 🤩 await retriever.ainvoke("that") [Document(page_content='Cats are independent pets that often enjoy their own space.', metadata={'type': 'cat', 'trait': 'independence'}), Document(page_content='Rabbits are social animals that need plenty of space to hop around.', metadata={'type': 'rabbit', 'trait': 'social'})] retriever.batch(["dog", "cat"]) [[Document(page_content='Dogs are great companions, known for their loyalty and friendliness.', metadata={'type': 'dog', 'trait': 'loyalty'})], [Document(page_content='Cats are independent pets that often enjoy their own space.', metadata={'type': 'cat', 'trait': 'independence'})]] async for event in retriever.astream_events("bar", version="v1"): print(event) {'event': 'on_retriever_start', 'run_id': 'f96f268d-8383-4921-b175-ca583924d9ff', 'name': 'ToyRetriever', 'tags': [], 'metadata': {}, 'data': {'input': 'bar'}}{'event': 'on_retriever_stream', 'run_id': 'f96f268d-8383-4921-b175-ca583924d9ff', 'tags': [], 'metadata': {}, 'name': 'ToyRetriever', 'data': {'chunk': []}}{'event': 'on_retriever_end', 'name': 'ToyRetriever', 'run_id': 'f96f268d-8383-4921-b175-ca583924d9ff', 'tags': [], 'metadata': {}, 'data': {'output': []}} Contributing[​](#contributing "Direct link to Contributing") ------------------------------------------------------------ We appreciate contributions of interesting retrievers! Here's a checklist to help make sure your contribution gets added to LangChain: Documentation: * The retriever contains doc-strings for all initialization arguments, as these will be surfaced in the [API Reference](https://api.python.langchain.com/en/stable/langchain_api_reference.html). * The class doc-string for the model contains a link to any relevant APIs used for the retriever (e.g., if the retriever is retrieving from wikipedia, it'll be good to link to the wikipedia API!) Tests: * Add unit or integration tests to verify that `invoke` and `ainvoke` work. Optimizations: If the retriever is connecting to external data sources (e.g., an API or a file), it'll almost certainly benefit from an async native optimization! * Provide a native async implementation of `_aget_relevant_documents` (used by `ainvoke`) [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/custom_retriever.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to create a custom LLM class ](/v0.2/docs/how_to/custom_llm/)[ Next How to create custom tools ](/v0.2/docs/how_to/custom_tools/) * [Overview](#overview) * [Interface](#interface) * [Example](#example) * [Test it 🧪](#test-it-) * [Contributing](#contributing)
null
https://python.langchain.com/v0.2/docs/how_to/add_scores_retriever/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to add scores to retriever results On this page How to add scores to retriever results ====================================== Retrievers will return sequences of [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) objects, which by default include no information about the process that retrieved them (e.g., a similarity score against a query). Here we demonstrate how to add retrieval scores to the `.metadata` of documents: 1. From [vectorstore retrievers](/v0.2/docs/how_to/vectorstore_retriever/); 2. From higher-order LangChain retrievers, such as [SelfQueryRetriever](/v0.2/docs/how_to/self_query/) or [MultiVectorRetriever](/v0.2/docs/how_to/multi_vector/). For (1), we will implement a short wrapper function around the corresponding vector store. For (2), we will update a method of the corresponding class. Create vector store[​](#create-vector-store "Direct link to Create vector store") --------------------------------------------------------------------------------- First we populate a vector store with some data. We will use a [PineconeVectorStore](https://api.python.langchain.com/en/latest/vectorstores/langchain_pinecone.vectorstores.PineconeVectorStore.html), but this guide is compatible with any LangChain vector store that implements a `.similarity_search_with_score` method. from langchain_core.documents import Documentfrom langchain_openai import OpenAIEmbeddingsfrom langchain_pinecone import PineconeVectorStoredocs = [ Document( page_content="A bunch of scientists bring back dinosaurs and mayhem breaks loose", metadata={"year": 1993, "rating": 7.7, "genre": "science fiction"}, ), Document( page_content="Leo DiCaprio gets lost in a dream within a dream within a dream within a ...", metadata={"year": 2010, "director": "Christopher Nolan", "rating": 8.2}, ), Document( page_content="A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea", metadata={"year": 2006, "director": "Satoshi Kon", "rating": 8.6}, ), Document( page_content="A bunch of normal-sized women are supremely wholesome and some men pine after them", metadata={"year": 2019, "director": "Greta Gerwig", "rating": 8.3}, ), Document( page_content="Toys come alive and have a blast doing so", metadata={"year": 1995, "genre": "animated"}, ), Document( page_content="Three men walk into the Zone, three men walk out of the Zone", metadata={ "year": 1979, "director": "Andrei Tarkovsky", "genre": "thriller", "rating": 9.9, }, ),]vectorstore = PineconeVectorStore.from_documents( docs, index_name="sample", embedding=OpenAIEmbeddings()) **API Reference:**[Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) | [PineconeVectorStore](https://api.python.langchain.com/en/latest/vectorstores/langchain_pinecone.vectorstores.PineconeVectorStore.html) Retriever[​](#retriever "Direct link to Retriever") --------------------------------------------------- To obtain scores from a vector store retriever, we wrap the underlying vector store's `.similarity_search_with_score` method in a short function that packages scores into the associated document's metadata. We add a `@chain` decorator to the function to create a [Runnable](/v0.2/docs/concepts/#langchain-expression-language) that can be used similarly to a typical retriever. from typing import Listfrom langchain_core.documents import Documentfrom langchain_core.runnables import chain@chaindef retriever(query: str) -> List[Document]: docs, scores = zip(*vectorstore.similarity_search_with_score(query)) for doc, score in zip(docs, scores): doc.metadata["score"] = score return docs **API Reference:**[Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) | [chain](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.chain.html) result = retriever.invoke("dinosaur")result (Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'genre': 'science fiction', 'rating': 7.7, 'year': 1993.0, 'score': 0.84429127}), Document(page_content='Toys come alive and have a blast doing so', metadata={'genre': 'animated', 'year': 1995.0, 'score': 0.792038262}), Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'director': 'Andrei Tarkovsky', 'genre': 'thriller', 'rating': 9.9, 'year': 1979.0, 'score': 0.751571238}), Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'director': 'Satoshi Kon', 'rating': 8.6, 'year': 2006.0, 'score': 0.747471571})) Note that similarity scores from the retrieval step are included in the metadata of the above documents. SelfQueryRetriever[​](#selfqueryretriever "Direct link to SelfQueryRetriever") ------------------------------------------------------------------------------ `SelfQueryRetriever` will use a LLM to generate a query that is potentially structured-- for example, it can construct filters for the retrieval on top of the usual semantic-similarity driven selection. See [this guide](/v0.2/docs/how_to/self_query/) for more detail. `SelfQueryRetriever` includes a short (1 - 2 line) method `_get_docs_with_query` that executes the `vectorstore` search. We can subclass `SelfQueryRetriever` and override this method to propagate similarity scores. First, following the [how-to guide](/v0.2/docs/how_to/self_query/), we will need to establish some metadata on which to filter: from langchain.chains.query_constructor.base import AttributeInfofrom langchain.retrievers.self_query.base import SelfQueryRetrieverfrom langchain_openai import ChatOpenAImetadata_field_info = [ AttributeInfo( name="genre", description="The genre of the movie. One of ['science fiction', 'comedy', 'drama', 'thriller', 'romance', 'action', 'animated']", type="string", ), AttributeInfo( name="year", description="The year the movie was released", type="integer", ), AttributeInfo( name="director", description="The name of the movie director", type="string", ), AttributeInfo( name="rating", description="A 1-10 rating for the movie", type="float" ),]document_content_description = "Brief summary of a movie"llm = ChatOpenAI(temperature=0) **API Reference:**[AttributeInfo](https://api.python.langchain.com/en/latest/chains/langchain.chains.query_constructor.schema.AttributeInfo.html) | [SelfQueryRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.self_query.base.SelfQueryRetriever.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) We then override the `_get_docs_with_query` to use the `similarity_search_with_score` method of the underlying vector store: from typing import Any, Dictclass CustomSelfQueryRetriever(SelfQueryRetriever): def _get_docs_with_query( self, query: str, search_kwargs: Dict[str, Any] ) -> List[Document]: """Get docs, adding score information.""" docs, scores = zip( *vectorstore.similarity_search_with_score(query, **search_kwargs) ) for doc, score in zip(docs, scores): doc.metadata["score"] = score return docs Invoking this retriever will now include similarity scores in the document metadata. Note that the underlying structured-query capabilities of `SelfQueryRetriever` are retained. retriever = CustomSelfQueryRetriever.from_llm( llm, vectorstore, document_content_description, metadata_field_info,)result = retriever.invoke("dinosaur movie with rating less than 8")result (Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'genre': 'science fiction', 'rating': 7.7, 'year': 1993.0, 'score': 0.84429127}),) MultiVectorRetriever[​](#multivectorretriever "Direct link to MultiVectorRetriever") ------------------------------------------------------------------------------------ `MultiVectorRetriever` allows you to associate multiple vectors with a single document. This can be useful in a number of applications. For example, we can index small chunks of a larger document and run the retrieval on the chunks, but return the larger "parent" document when invoking the retriever. [ParentDocumentRetriever](/v0.2/docs/how_to/parent_document_retriever/), a subclass of `MultiVectorRetriever`, includes convenience methods for populating a vector store to support this. Further applications are detailed in this [how-to guide](/v0.2/docs/how_to/multi_vector/). To propagate similarity scores through this retriever, we can again subclass `MultiVectorRetriever` and override a method. This time we will override `_get_relevant_documents`. First, we prepare some fake data. We generate fake "whole documents" and store them in a document store; here we will use a simple [InMemoryStore](https://api.python.langchain.com/en/latest/stores/langchain_core.stores.InMemoryBaseStore.html). from langchain.storage import InMemoryStorefrom langchain_text_splitters import RecursiveCharacterTextSplitter# The storage layer for the parent documentsdocstore = InMemoryStore()fake_whole_documents = [ ("fake_id_1", Document(page_content="fake whole document 1")), ("fake_id_2", Document(page_content="fake whole document 2")),]docstore.mset(fake_whole_documents) **API Reference:**[InMemoryStore](https://api.python.langchain.com/en/latest/stores/langchain_core.stores.InMemoryStore.html) | [RecursiveCharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.RecursiveCharacterTextSplitter.html) Next we will add some fake "sub-documents" to our vector store. We can link these sub-documents to the parent documents by populating the `"doc_id"` key in its metadata. docs = [ Document( page_content="A snippet from a larger document discussing cats.", metadata={"doc_id": "fake_id_1"}, ), Document( page_content="A snippet from a larger document discussing discourse.", metadata={"doc_id": "fake_id_1"}, ), Document( page_content="A snippet from a larger document discussing chocolate.", metadata={"doc_id": "fake_id_2"}, ),]vectorstore.add_documents(docs) ['62a85353-41ff-4346-bff7-be6c8ec2ed89', '5d4a0e83-4cc5-40f1-bc73-ed9cbad0ee15', '8c1d9a56-120f-45e4-ba70-a19cd19a38f4'] To propagate the scores, we subclass `MultiVectorRetriever` and override its `_get_relevant_documents` method. Here we will make two changes: 1. We will add similarity scores to the metadata of the corresponding "sub-documents" using the `similarity_search_with_score` method of the underlying vector store as above; 2. We will include a list of these sub-documents in the metadata of the retrieved parent document. This surfaces what snippets of text were identified by the retrieval, together with their corresponding similarity scores. from collections import defaultdictfrom langchain.retrievers import MultiVectorRetrieverfrom langchain_core.callbacks import CallbackManagerForRetrieverRunclass CustomMultiVectorRetriever(MultiVectorRetriever): def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: """Get documents relevant to a query. Args: query: String to find relevant documents for run_manager: The callbacks handler to use Returns: List of relevant documents """ results = self.vectorstore.similarity_search_with_score( query, **self.search_kwargs ) # Map doc_ids to list of sub-documents, adding scores to metadata id_to_doc = defaultdict(list) for doc, score in results: doc_id = doc.metadata.get("doc_id") if doc_id: doc.metadata["score"] = score id_to_doc[doc_id].append(doc) # Fetch documents corresponding to doc_ids, retaining sub_docs in metadata docs = [] for _id, sub_docs in id_to_doc.items(): docstore_docs = self.docstore.mget([_id]) if docstore_docs: if doc := docstore_docs[0]: doc.metadata["sub_docs"] = sub_docs docs.append(doc) return docs **API Reference:**[MultiVectorRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.multi_vector.MultiVectorRetriever.html) | [CallbackManagerForRetrieverRun](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.CallbackManagerForRetrieverRun.html) Invoking this retriever, we can see that it identifies the correct parent document, including the relevant snippet from the sub-document with similarity score. retriever = CustomMultiVectorRetriever(vectorstore=vectorstore, docstore=docstore)retriever.invoke("cat") [Document(page_content='fake whole document 1', metadata={'sub_docs': [Document(page_content='A snippet from a larger document discussing cats.', metadata={'doc_id': 'fake_id_1', 'score': 0.831276655})]})] [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/add_scores_retriever.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to use the MultiQueryRetriever ](/v0.2/docs/how_to/MultiQueryRetriever/)[ Next Caching ](/v0.2/docs/how_to/caching_embeddings/) * [Create vector store](#create-vector-store) * [Retriever](#retriever) * [SelfQueryRetriever](#selfqueryretriever) * [MultiVectorRetriever](#multivectorretriever)
null
https://python.langchain.com/v0.2/docs/how_to/caching_embeddings/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * Caching On this page Caching ======= Embeddings can be stored or temporarily cached to avoid needing to recompute them. Caching embeddings can be done using a `CacheBackedEmbeddings`. The cache backed embedder is a wrapper around an embedder that caches embeddings in a key-value store. The text is hashed and the hash is used as the key in the cache. The main supported way to initialize a `CacheBackedEmbeddings` is `from_bytes_store`. It takes the following parameters: * underlying\_embedder: The embedder to use for embedding. * document\_embedding\_cache: Any [`ByteStore`](/v0.2/docs/integrations/stores/) for caching document embeddings. * batch\_size: (optional, defaults to `None`) The number of documents to embed between store updates. * namespace: (optional, defaults to `""`) The namespace to use for document cache. This namespace is used to avoid collisions with other caches. For example, set it to the name of the embedding model used. * query\_embedding\_cache: (optional, defaults to `None` or not caching) A [`ByteStore`](/v0.2/docs/integrations/stores/) for caching query embeddings, or `True` to use the same store as `document_embedding_cache`. **Attention**: * Be sure to set the `namespace` parameter to avoid collisions of the same text embedded using different embeddings models. * `CacheBackedEmbeddings` does not cache query embeddings by default. To enable query caching, one need to specify a `query_embedding_cache`. from langchain.embeddings import CacheBackedEmbeddings **API Reference:**[CacheBackedEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain.embeddings.cache.CacheBackedEmbeddings.html) Using with a Vector Store[​](#using-with-a-vector-store "Direct link to Using with a Vector Store") --------------------------------------------------------------------------------------------------- First, let's see an example that uses the local file system for storing embeddings and uses FAISS vector store for retrieval. %pip install --upgrade --quiet langchain-openai faiss-cpu from langchain.storage import LocalFileStorefrom langchain_community.document_loaders import TextLoaderfrom langchain_community.vectorstores import FAISSfrom langchain_openai import OpenAIEmbeddingsfrom langchain_text_splitters import CharacterTextSplitterunderlying_embeddings = OpenAIEmbeddings()store = LocalFileStore("./cache/")cached_embedder = CacheBackedEmbeddings.from_bytes_store( underlying_embeddings, store, namespace=underlying_embeddings.model) **API Reference:**[LocalFileStore](https://api.python.langchain.com/en/latest/storage/langchain.storage.file_system.LocalFileStore.html) | [TextLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.text.TextLoader.html) | [FAISS](https://api.python.langchain.com/en/latest/vectorstores/langchain_community.vectorstores.faiss.FAISS.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) | [CharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.CharacterTextSplitter.html) The cache is empty prior to embedding: list(store.yield_keys()) [] Load the document, split it into chunks, embed each chunk and load it into the vector store. raw_documents = TextLoader("state_of_the_union.txt").load()text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)documents = text_splitter.split_documents(raw_documents) Create the vector store: %%timedb = FAISS.from_documents(documents, cached_embedder) CPU times: user 218 ms, sys: 29.7 ms, total: 248 msWall time: 1.02 s If we try to create the vector store again, it'll be much faster since it does not need to re-compute any embeddings. %%timedb2 = FAISS.from_documents(documents, cached_embedder) CPU times: user 15.7 ms, sys: 2.22 ms, total: 18 msWall time: 17.2 ms And here are some of the embeddings that got created: list(store.yield_keys())[:5] ['text-embedding-ada-00217a6727d-8916-54eb-b196-ec9c9d6ca472', 'text-embedding-ada-0025fc0d904-bd80-52da-95c9-441015bfb438', 'text-embedding-ada-002e4ad20ef-dfaa-5916-9459-f90c6d8e8159', 'text-embedding-ada-002ed199159-c1cd-5597-9757-f80498e8f17b', 'text-embedding-ada-0021297d37a-2bc1-5e19-bf13-6c950f075062'] Swapping the `ByteStore` ======================== In order to use a different `ByteStore`, just use it when creating your `CacheBackedEmbeddings`. Below, we create an equivalent cached embeddings object, except using the non-persistent `InMemoryByteStore` instead: from langchain.embeddings import CacheBackedEmbeddingsfrom langchain.storage import InMemoryByteStorestore = InMemoryByteStore()cached_embedder = CacheBackedEmbeddings.from_bytes_store( underlying_embeddings, store, namespace=underlying_embeddings.model) **API Reference:**[CacheBackedEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain.embeddings.cache.CacheBackedEmbeddings.html) | [InMemoryByteStore](https://api.python.langchain.com/en/latest/stores/langchain_core.stores.InMemoryByteStore.html) [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/caching_embeddings.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to add scores to retriever results ](/v0.2/docs/how_to/add_scores_retriever/)[ Next How to use callbacks in async environments ](/v0.2/docs/how_to/callbacks_async/) * [Using with a Vector Store](#using-with-a-vector-store)
null
https://python.langchain.com/v0.2/docs/how_to/debugging/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to debug your LLM apps On this page How to debug your LLM apps ========================== Like building any type of software, at some point you'll need to debug when building with LLMs. A model call will fail, or model output will be misformatted, or there will be some nested model calls and it won't be clear where along the way an incorrect output was created. There are three main methods for debugging: * Verbose Mode: This adds print statements for "important" events in your chain. * Debug Mode: This add logging statements for ALL events in your chain. * LangSmith Tracing: This logs events to [LangSmith](https://docs.smith.langchain.com/) to allow for visualization there. Verbose Mode Debug Mode LangSmith Tracing Free ✅ ✅ ✅ UI ❌ ❌ ✅ Persisted ❌ ❌ ✅ See all events ❌ ✅ ✅ See "important" events ✅ ❌ ✅ Runs Locally ✅ ✅ ❌ Tracing[​](#tracing "Direct link to Tracing") --------------------------------------------- Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with [LangSmith](https://smith.langchain.com). After you sign up at the link above, make sure to set your environment variables to start logging traces: export LANGCHAIN_TRACING_V2="true"export LANGCHAIN_API_KEY="..." Or, if in a notebook, you can set them with: import getpassimport osos.environ["LANGCHAIN_TRACING_V2"] = "true"os.environ["LANGCHAIN_API_KEY"] = getpass.getpass() Let's suppose we have an agent, and want to visualize the actions it takes and tool outputs it receives. Without any debugging, here's what we see: * OpenAI * Anthropic * Azure * Google * Cohere * FireworksAI * Groq * MistralAI * TogetherAI pip install -qU langchain-openai import getpassimport osos.environ["OPENAI_API_KEY"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI(model="gpt-3.5-turbo-0125") pip install -qU langchain-anthropic import getpassimport osos.environ["ANTHROPIC_API_KEY"] = getpass.getpass()from langchain_anthropic import ChatAnthropicllm = ChatAnthropic(model="claude-3-sonnet-20240229") pip install -qU langchain-openai import getpassimport osos.environ["AZURE_OPENAI_API_KEY"] = getpass.getpass()from langchain_openai import AzureChatOpenAIllm = AzureChatOpenAI( azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"], openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],) pip install -qU langchain-google-vertexai import getpassimport osos.environ["GOOGLE_API_KEY"] = getpass.getpass()from langchain_google_vertexai import ChatVertexAIllm = ChatVertexAI(model="gemini-pro") pip install -qU langchain-cohere import getpassimport osos.environ["COHERE_API_KEY"] = getpass.getpass()from langchain_cohere import ChatCoherellm = ChatCohere(model="command-r") pip install -qU langchain-fireworks import getpassimport osos.environ["FIREWORKS_API_KEY"] = getpass.getpass()from langchain_fireworks import ChatFireworksllm = ChatFireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct") pip install -qU langchain-groq import getpassimport osos.environ["GROQ_API_KEY"] = getpass.getpass()from langchain_groq import ChatGroqllm = ChatGroq(model="llama3-8b-8192") pip install -qU langchain-mistralai import getpassimport osos.environ["MISTRAL_API_KEY"] = getpass.getpass()from langchain_mistralai import ChatMistralAIllm = ChatMistralAI(model="mistral-large-latest") pip install -qU langchain-openai import getpassimport osos.environ["TOGETHER_API_KEY"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI( base_url="https://api.together.xyz/v1", api_key=os.environ["TOGETHER_API_KEY"], model="mistralai/Mixtral-8x7B-Instruct-v0.1",) from langchain.agents import AgentExecutor, create_tool_calling_agentfrom langchain_community.tools.tavily_search import TavilySearchResultsfrom langchain_core.prompts import ChatPromptTemplatetools = [TavilySearchResults(max_results=1)]prompt = ChatPromptTemplate.from_messages( [ ( "system", "You are a helpful assistant.", ), ("placeholder", "{chat_history}"), ("human", "{input}"), ("placeholder", "{agent_scratchpad}"), ])# Construct the Tools agentagent = create_tool_calling_agent(llm, tools, prompt)# Create an agent executor by passing in the agent and toolsagent_executor = AgentExecutor(agent=agent, tools=tools)agent_executor.invoke( {"input": "Who directed the 2023 film Oppenheimer and what is their age in days?"}) **API Reference:**[AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html) | [create\_tool\_calling\_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.tool_calling_agent.base.create_tool_calling_agent.html) | [TavilySearchResults](https://api.python.langchain.com/en/latest/tools/langchain_community.tools.tavily_search.tool.TavilySearchResults.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) {'input': 'Who directed the 2023 film Oppenheimer and what is their age in days?', 'output': 'The 2023 film "Oppenheimer" was directed by Christopher Nolan.\n\nTo calculate Christopher Nolan\'s age in days, we first need his birthdate, which is July 30, 1970. Let\'s calculate his age in days from his birthdate to today\'s date, December 7, 2023.\n\n1. Calculate the total number of days from July 30, 1970, to December 7, 2023.\n2. Nolan was born on July 30, 1970. From July 30, 1970, to July 30, 2023, is 53 years.\n3. From July 30, 2023, to December 7, 2023, is 130 days.\n\nNow, calculate the total days:\n- 53 years = 53 x 365 = 19,345 days\n- Adding leap years from 1970 to 2023: There are 13 leap years (1972, 1976, 1980, 1984, 1988, 1992, 1996, 2000, 2004, 2008, 2012, 2016, 2020). So, add 13 days.\n- Total days from years and leap years = 19,345 + 13 = 19,358 days\n- Add the days from July 30, 2023, to December 7, 2023 = 130 days\n\nTotal age in days = 19,358 + 130 = 19,488 days\n\nChristopher Nolan is 19,488 days old as of December 7, 2023.'} We don't get much output, but since we set up LangSmith we can easily see what happened under the hood: [https://smith.langchain.com/public/a89ff88f-9ddc-4757-a395-3a1b365655bf/r](https://smith.langchain.com/public/a89ff88f-9ddc-4757-a395-3a1b365655bf/r) `set_debug` and `set_verbose`[​](#set_debug-and-set_verbose "Direct link to set_debug-and-set_verbose") ------------------------------------------------------------------------------------------------------- If you're prototyping in Jupyter Notebooks or running Python scripts, it can be helpful to print out the intermediate steps of a chain run. There are a number of ways to enable printing at varying degrees of verbosity. Note: These still work even with LangSmith enabled, so you can have both turned on and running at the same time ### `set_verbose(True)`[​](#set_verbosetrue "Direct link to set_verbosetrue") Setting the `verbose` flag will print out inputs and outputs in a slightly more readable format and will skip logging certain raw outputs (like the token usage stats for an LLM call) so that you can focus on application logic. from langchain.globals import set_verboseset_verbose(True)agent_executor = AgentExecutor(agent=agent, tools=tools)agent_executor.invoke( {"input": "Who directed the 2023 film Oppenheimer and what is their age in days?"}) **API Reference:**[set\_verbose](https://api.python.langchain.com/en/latest/globals/langchain.globals.set_verbose.html) > Entering new AgentExecutor chain...Invoking: `tavily_search_results_json` with `{'query': 'director of the 2023 film Oppenheimer'}`[{'url': 'https://m.imdb.com/title/tt15398776/', 'content': 'Oppenheimer: Directed by Christopher Nolan. With Cillian Murphy, Emily Blunt, Robert Downey Jr., Alden Ehrenreich. The story of American scientist J. Robert Oppenheimer and his role in the development of the atomic bomb.'}]Invoking: `tavily_search_results_json` with `{'query': 'birth date of Christopher Nolan'}`[{'url': 'https://m.imdb.com/name/nm0634240/bio/', 'content': 'Christopher Nolan. Writer: Tenet. Best known for his cerebral, often nonlinear, storytelling, acclaimed Academy Award winner writer/director/producer Sir Christopher Nolan CBE was born in London, England. Over the course of more than 25 years of filmmaking, Nolan has gone from low-budget independent films to working on some of the biggest blockbusters ever made and became one of the most ...'}]Invoking: `tavily_search_results_json` with `{'query': 'Christopher Nolan birth date'}`responded: The 2023 film **Oppenheimer** was directed by **Christopher Nolan**.To calculate Christopher Nolan's age in days, I need his exact birth date. Let me find that information for you.[{'url': 'https://m.imdb.com/name/nm0634240/bio/', 'content': 'Christopher Nolan. Writer: Tenet. Best known for his cerebral, often nonlinear, storytelling, acclaimed Academy Award winner writer/director/producer Sir Christopher Nolan CBE was born in London, England. Over the course of more than 25 years of filmmaking, Nolan has gone from low-budget independent films to working on some of the biggest blockbusters ever made and became one of the most ...'}]Invoking: `tavily_search_results_json` with `{'query': 'Christopher Nolan date of birth'}`responded: It appears that I need to refine my search to get the exact birth date of Christopher Nolan. Let me try again to find that specific information.[{'url': 'https://m.imdb.com/name/nm0634240/bio/', 'content': 'Christopher Nolan. Writer: Tenet. Best known for his cerebral, often nonlinear, storytelling, acclaimed Academy Award winner writer/director/producer Sir Christopher Nolan CBE was born in London, England. Over the course of more than 25 years of filmmaking, Nolan has gone from low-budget independent films to working on some of the biggest blockbusters ever made and became one of the most ...'}]I am currently unable to retrieve the exact birth date of Christopher Nolan from the sources available. However, it is widely known that he was born on July 30, 1970. Using this date, I can calculate his age in days as of today.Let's calculate:- Christopher Nolan's birth date: July 30, 1970.- Today's date: December 7, 2023.The number of days between these two dates can be calculated as follows:1. From July 30, 1970, to July 30, 2023, is 53 years.2. From July 30, 2023, to December 7, 2023, is 130 days.Calculating the total days for 53 years (considering leap years):- 53 years × 365 days/year = 19,345 days- Adding leap years (1972, 1976, ..., 2020, 2024 - 13 leap years): 13 daysTotal days from birth until July 30, 2023: 19,345 + 13 = 19,358 daysAdding the days from July 30, 2023, to December 7, 2023: 130 daysTotal age in days as of December 7, 2023: 19,358 + 130 = 19,488 days.Therefore, Christopher Nolan is 19,488 days old as of December 7, 2023.> Finished chain. {'input': 'Who directed the 2023 film Oppenheimer and what is their age in days?', 'output': "I am currently unable to retrieve the exact birth date of Christopher Nolan from the sources available. However, it is widely known that he was born on July 30, 1970. Using this date, I can calculate his age in days as of today.\n\nLet's calculate:\n\n- Christopher Nolan's birth date: July 30, 1970.\n- Today's date: December 7, 2023.\n\nThe number of days between these two dates can be calculated as follows:\n\n1. From July 30, 1970, to July 30, 2023, is 53 years.\n2. From July 30, 2023, to December 7, 2023, is 130 days.\n\nCalculating the total days for 53 years (considering leap years):\n- 53 years × 365 days/year = 19,345 days\n- Adding leap years (1972, 1976, ..., 2020, 2024 - 13 leap years): 13 days\n\nTotal days from birth until July 30, 2023: 19,345 + 13 = 19,358 days\nAdding the days from July 30, 2023, to December 7, 2023: 130 days\n\nTotal age in days as of December 7, 2023: 19,358 + 130 = 19,488 days.\n\nTherefore, Christopher Nolan is 19,488 days old as of December 7, 2023."} ### `set_debug(True)`[​](#set_debugtrue "Direct link to set_debugtrue") Setting the global `debug` flag will cause all LangChain components with callback support (chains, models, agents, tools, retrievers) to print the inputs they receive and outputs they generate. This is the most verbose setting and will fully log raw inputs and outputs. from langchain.globals import set_debugset_debug(True)set_verbose(False)agent_executor = AgentExecutor(agent=agent, tools=tools)agent_executor.invoke( {"input": "Who directed the 2023 film Oppenheimer and what is their age in days?"}) **API Reference:**[set\_debug](https://api.python.langchain.com/en/latest/globals/langchain.globals.set_debug.html) [chain/start] [1:chain:AgentExecutor] Entering Chain run with input:{ "input": "Who directed the 2023 film Oppenheimer and what is their age in days?"}[chain/start] [1:chain:AgentExecutor > 2:chain:RunnableSequence] Entering Chain run with input:{ "input": ""}[chain/start] [1:chain:AgentExecutor > 2:chain:RunnableSequence > 3:chain:RunnableAssign<agent_scratchpad>] Entering Chain run with input:{ "input": ""}[chain/start] [1:chain:AgentExecutor > 2:chain:RunnableSequence > 3:chain:RunnableAssign<agent_scratchpad> > 4:chain:RunnableParallel<agent_scratchpad>] Entering Chain run with input:{ "input": ""}[chain/start] [1:chain:AgentExecutor > 2:chain:RunnableSequence > 3:chain:RunnableAssign<agent_scratchpad> > 4:chain:RunnableParallel<agent_scratchpad> > 5:chain:RunnableLambda] Entering Chain run with input:{ "input": ""}[chain/end] [1:chain:AgentExecutor > 2:chain:RunnableSequence > 3:chain:RunnableAssign<agent_scratchpad> > 4:chain:RunnableParallel<agent_scratchpad> > 5:chain:RunnableLambda] [1ms] Exiting Chain run with output:{ "output": []}[chain/end] [1:chain:AgentExecutor > 2:chain:RunnableSequence > 3:chain:RunnableAssign<agent_scratchpad> > 4:chain:RunnableParallel<agent_scratchpad>] [2ms] Exiting Chain run with output:{ "agent_scratchpad": []}[chain/end] [1:chain:AgentExecutor > 2:chain:RunnableSequence > 3:chain:RunnableAssign<agent_scratchpad>] [5ms] Exiting Chain run with output:{ "input": "Who directed the 2023 film Oppenheimer and what is their age in days?", "intermediate_steps": [], "agent_scratchpad": []}[chain/start] [1:chain:AgentExecutor > 2:chain:RunnableSequence > 6:prompt:ChatPromptTemplate] Entering Prompt run with input:{ "input": "Who directed the 2023 film Oppenheimer and what is their age in days?", "intermediate_steps": [], "agent_scratchpad": []}[chain/end] [1:chain:AgentExecutor > 2:chain:RunnableSequence > 6:prompt:ChatPromptTemplate] [1ms] Exiting Prompt run with output:[outputs][llm/start] [1:chain:AgentExecutor > 2:chain:RunnableSequence > 7:llm:ChatOpenAI] Entering LLM run with input:{ "prompts": [ "System: You are a helpful assistant.\nHuman: Who directed the 2023 film Oppenheimer and what is their age in days?" ]}[llm/end] [1:chain:AgentExecutor > 2:chain:RunnableSequence > 7:llm:ChatOpenAI] [3.17s] Exiting LLM run with output:{ "generations": [ [ { "text": "", "generation_info": { "finish_reason": "tool_calls" }, "type": "ChatGenerationChunk", "message": { "lc": 1, "type": "constructor", "id": [ "langchain", "schema", "messages", "AIMessageChunk" ], "kwargs": { "content": "", "example": false, "additional_kwargs": { "tool_calls": [ { "index": 0, "id": "call_fnfq6GjSQED4iF6lo4rxkUup", "function": { "arguments": "{\"query\": \"director of the 2023 film Oppenheimer\"}", "name": "tavily_search_results_json" }, "type": "function" }, { "index": 1, "id": "call_mwhVi6pk49f4OIo5rOWrr4TD", "function": { "arguments": "{\"query\": \"birth date of Christopher Nolan\"}", "name": "tavily_search_results_json" }, "type": "function" } ] }, "tool_call_chunks": [ { "name": "tavily_search_results_json", "args": "{\"query\": \"director of the 2023 film Oppenheimer\"}", "id": "call_fnfq6GjSQED4iF6lo4rxkUup", "index": 0 }, { "name": "tavily_search_results_json", "args": "{\"query\": \"birth date of Christopher Nolan\"}", "id": "call_mwhVi6pk49f4OIo5rOWrr4TD", "index": 1 } ], "response_metadata": { "finish_reason": "tool_calls" }, "id": "run-6e160323-15f9-491d-aadf-b5d337e9e2a1", "tool_calls": [ { "name": "tavily_search_results_json", "args": { "query": "director of the 2023 film Oppenheimer" }, "id": "call_fnfq6GjSQED4iF6lo4rxkUup" }, { "name": "tavily_search_results_json", "args": { "query": "birth date of Christopher Nolan" }, "id": "call_mwhVi6pk49f4OIo5rOWrr4TD" } ], "invalid_tool_calls": [] } } } ] ], "llm_output": null, "run": null}[chain/start] [1:chain:AgentExecutor > 2:chain:RunnableSequence > 8:parser:ToolsAgentOutputParser] Entering Parser run with input:[inputs][chain/end] [1:chain:AgentExecutor > 2:chain:RunnableSequence > 8:parser:ToolsAgentOutputParser] [1ms] Exiting Parser run with output:[outputs][chain/end] [1:chain:AgentExecutor > 2:chain:RunnableSequence] [3.18s] Exiting Chain run with output:[outputs][tool/start] [1:chain:AgentExecutor > 9:tool:tavily_search_results_json] Entering Tool run with input:"{'query': 'director of the 2023 film Oppenheimer'}"``````outputError in ConsoleCallbackHandler.on_tool_end callback: AttributeError("'list' object has no attribute 'strip'")``````output[tool/start] [1:chain:AgentExecutor > 10:tool:tavily_search_results_json] Entering Tool run with input:"{'query': 'birth date of Christopher Nolan'}"``````outputError in ConsoleCallbackHandler.on_tool_end callback: AttributeError("'list' object has no attribute 'strip'")``````output[chain/start] [1:chain:AgentExecutor > 11:chain:RunnableSequence] Entering Chain run with input:{ "input": ""}[chain/start] [1:chain:AgentExecutor > 11:chain:RunnableSequence > 12:chain:RunnableAssign<agent_scratchpad>] Entering Chain run with input:{ "input": ""}[chain/start] [1:chain:AgentExecutor > 11:chain:RunnableSequence > 12:chain:RunnableAssign<agent_scratchpad> > 13:chain:RunnableParallel<agent_scratchpad>] Entering Chain run with input:{ "input": ""}[chain/start] [1:chain:AgentExecutor > 11:chain:RunnableSequence > 12:chain:RunnableAssign<agent_scratchpad> > 13:chain:RunnableParallel<agent_scratchpad> > 14:chain:RunnableLambda] Entering Chain run with input:{ "input": ""}[chain/end] [1:chain:AgentExecutor > 11:chain:RunnableSequence > 12:chain:RunnableAssign<agent_scratchpad> > 13:chain:RunnableParallel<agent_scratchpad> > 14:chain:RunnableLambda] [1ms] Exiting Chain run with output:[outputs][chain/end] [1:chain:AgentExecutor > 11:chain:RunnableSequence > 12:chain:RunnableAssign<agent_scratchpad> > 13:chain:RunnableParallel<agent_scratchpad>] [4ms] Exiting Chain run with output:[outputs][chain/end] [1:chain:AgentExecutor > 11:chain:RunnableSequence > 12:chain:RunnableAssign<agent_scratchpad>] [8ms] Exiting Chain run with output:[outputs][chain/start] [1:chain:AgentExecutor > 11:chain:RunnableSequence > 15:prompt:ChatPromptTemplate] Entering Prompt run with input:[inputs][chain/end] [1:chain:AgentExecutor > 11:chain:RunnableSequence > 15:prompt:ChatPromptTemplate] [1ms] Exiting Prompt run with output:[outputs][llm/start] [1:chain:AgentExecutor > 11:chain:RunnableSequence > 16:llm:ChatOpenAI] Entering LLM run with input:{ "prompts": [ "System: You are a helpful assistant.\nHuman: Who directed the 2023 film Oppenheimer and what is their age in days?\nAI: \nTool: [{\"url\": \"https://m.imdb.com/title/tt15398776/fullcredits/\", \"content\": \"Oppenheimer (2023) cast and crew credits, including actors, actresses, directors, writers and more. Menu. ... director of photography: behind-the-scenes Jason Gary ... best boy grip ... film loader Luc Poullain ... aerial coordinator\"}]\nTool: [{\"url\": \"https://en.wikipedia.org/wiki/Christopher_Nolan\", \"content\": \"In early 2003, Nolan approached Warner Bros. with the idea of making a new Batman film, based on the character's origin story.[58] Nolan was fascinated by the notion of grounding it in a more realistic world than a comic-book fantasy.[59] He relied heavily on traditional stunts and miniature effects during filming, with minimal use of computer-generated imagery (CGI).[60] Batman Begins (2005), the biggest project Nolan had undertaken to that point,[61] was released to critical acclaim and commercial success.[62][63] Starring Christian Bale as Bruce Wayne / Batman—along with Michael Caine, Gary Oldman, Morgan Freeman and Liam Neeson—Batman Begins revived the franchise.[64][65] Batman Begins was 2005's ninth-highest-grossing film and was praised for its psychological depth and contemporary relevance;[63][66] it is cited as one of the most influential films of the 2000s.[67] Film author Ian Nathan wrote that within five years of his career, Nolan \\\"[went] from unknown to indie darling to gaining creative control over one of the biggest properties in Hollywood, and (perhaps unwittingly) fomenting the genre that would redefine the entire industry\\\".[68]\\nNolan directed, co-wrote and produced The Prestige (2006), an adaptation of the Christopher Priest novel about two rival 19th-century magicians.[69] He directed, wrote and edited the short film Larceny (1996),[19] which was filmed over a weekend in black and white with limited equipment and a small cast and crew.[12][20] Funded by Nolan and shot with the UCL Union Film society's equipment, it appeared at the Cambridge Film Festival in 1996 and is considered one of UCL's best shorts.[21] For unknown reasons, the film has since been removed from public view.[19] Nolan filmed a third short, Doodlebug (1997), about a man seemingly chasing an insect with his shoe, only to discover that it is a miniature of himself.[14][22] Nolan and Thomas first attempted to make a feature in the mid-1990s with Larry Mahoney, which they scrapped.[23] During this period in his career, Nolan had little to no success getting his projects off the ground, facing several rejections; he added, \\\"[T]here's a very limited pool of finance in the UK. Philosophy professor David Kyle Johnson wrote that \\\"Inception became a classic almost as soon as it was projected on silver screens\\\", praising its exploration of philosophical ideas, including leap of faith and allegory of the cave.[97] The film grossed over $836 million worldwide.[98] Nominated for eight Academy Awards—including Best Picture and Best Original Screenplay—it won Best Cinematography, Best Sound Mixing, Best Sound Editing and Best Visual Effects.[99] Nolan was nominated for a BAFTA Award and a Golden Globe Award for Best Director, among other accolades.[40]\\nAround the release of The Dark Knight Rises (2012), Nolan's third and final Batman film, Joseph Bevan of the British Film Institute wrote a profile on him: \\\"In the space of just over a decade, Christopher Nolan has shot from promising British indie director to undisputed master of a new brand of intelligent escapism. He further wrote that Nolan's body of work reflect \\\"a heterogeneity of conditions of products\\\" extending from low-budget films to lucrative blockbusters, \\\"a wide range of genres and settings\\\" and \\\"a diversity of styles that trumpet his versatility\\\".[193]\\nDavid Bordwell, a film theorist, wrote that Nolan has been able to blend his \\\"experimental impulses\\\" with the demands of mainstream entertainment, describing his oeuvre as \\\"experiments with cinematic time by means of techniques of subjective viewpoint and crosscutting\\\".[194] Nolan's use of practical, in-camera effects, miniatures and models, as well as shooting on celluloid film, has been highly influential in early 21st century cinema.[195][196] IndieWire wrote in 2019 that, Nolan \\\"kept a viable alternate model of big-budget filmmaking alive\\\", in an era where blockbuster filmmaking has become \\\"a largely computer-generated art form\\\".[196] Initially reluctant to make a sequel, he agreed after Warner Bros. repeatedly insisted.[78] Nolan wanted to expand on the noir quality of the first film by broadening the canvas and taking on \\\"the dynamic of a story of the city, a large crime story ... where you're looking at the police, the justice system, the vigilante, the poor people, the rich people, the criminals\\\".[79] Continuing to minimalise the use of CGI, Nolan employed high-resolution IMAX cameras, making it the first major motion picture to use this technology.[80][81]\"}]" ]}[llm/end] [1:chain:AgentExecutor > 11:chain:RunnableSequence > 16:llm:ChatOpenAI] [20.22s] Exiting LLM run with output:{ "generations": [ [ { "text": "The 2023 film \"Oppenheimer\" was directed by Christopher Nolan.\n\nTo calculate Christopher Nolan's age in days, we first need his birth date, which is July 30, 1970. Let's calculate his age in days from his birth date to today's date, December 7, 2023.\n\n1. Calculate the total number of days from July 30, 1970, to December 7, 2023.\n2. Christopher Nolan was born on July 30, 1970. From July 30, 1970, to July 30, 2023, is 53 years.\n3. From July 30, 2023, to December 7, 2023, is 130 days.\n\nNow, calculate the total days for 53 years:\n- Each year has 365 days, so 53 years × 365 days/year = 19,345 days.\n- Adding the leap years from 1970 to 2023: 1972, 1976, 1980, 1984, 1988, 1992, 1996, 2000, 2004, 2008, 2012, 2016, 2020, and 2024 (up to February). This gives us 14 leap years.\n- Total days from leap years: 14 days.\n\nAdding all together:\n- Total days = 19,345 days (from years) + 14 days (from leap years) + 130 days (from July 30, 2023, to December 7, 2023) = 19,489 days.\n\nTherefore, as of December 7, 2023, Christopher Nolan is 19,489 days old.", "generation_info": { "finish_reason": "stop" }, "type": "ChatGenerationChunk", "message": { "lc": 1, "type": "constructor", "id": [ "langchain", "schema", "messages", "AIMessageChunk" ], "kwargs": { "content": "The 2023 film \"Oppenheimer\" was directed by Christopher Nolan.\n\nTo calculate Christopher Nolan's age in days, we first need his birth date, which is July 30, 1970. Let's calculate his age in days from his birth date to today's date, December 7, 2023.\n\n1. Calculate the total number of days from July 30, 1970, to December 7, 2023.\n2. Christopher Nolan was born on July 30, 1970. From July 30, 1970, to July 30, 2023, is 53 years.\n3. From July 30, 2023, to December 7, 2023, is 130 days.\n\nNow, calculate the total days for 53 years:\n- Each year has 365 days, so 53 years × 365 days/year = 19,345 days.\n- Adding the leap years from 1970 to 2023: 1972, 1976, 1980, 1984, 1988, 1992, 1996, 2000, 2004, 2008, 2012, 2016, 2020, and 2024 (up to February). This gives us 14 leap years.\n- Total days from leap years: 14 days.\n\nAdding all together:\n- Total days = 19,345 days (from years) + 14 days (from leap years) + 130 days (from July 30, 2023, to December 7, 2023) = 19,489 days.\n\nTherefore, as of December 7, 2023, Christopher Nolan is 19,489 days old.", "example": false, "additional_kwargs": {}, "tool_call_chunks": [], "response_metadata": { "finish_reason": "stop" }, "id": "run-1c08a44f-db70-4836-935b-417caaf422a5", "tool_calls": [], "invalid_tool_calls": [] } } } ] ], "llm_output": null, "run": null}[chain/start] [1:chain:AgentExecutor > 11:chain:RunnableSequence > 17:parser:ToolsAgentOutputParser] Entering Parser run with input:[inputs][chain/end] [1:chain:AgentExecutor > 11:chain:RunnableSequence > 17:parser:ToolsAgentOutputParser] [2ms] Exiting Parser run with output:[outputs][chain/end] [1:chain:AgentExecutor > 11:chain:RunnableSequence] [20.27s] Exiting Chain run with output:[outputs][chain/end] [1:chain:AgentExecutor] [26.37s] Exiting Chain run with output:{ "output": "The 2023 film \"Oppenheimer\" was directed by Christopher Nolan.\n\nTo calculate Christopher Nolan's age in days, we first need his birth date, which is July 30, 1970. Let's calculate his age in days from his birth date to today's date, December 7, 2023.\n\n1. Calculate the total number of days from July 30, 1970, to December 7, 2023.\n2. Christopher Nolan was born on July 30, 1970. From July 30, 1970, to July 30, 2023, is 53 years.\n3. From July 30, 2023, to December 7, 2023, is 130 days.\n\nNow, calculate the total days for 53 years:\n- Each year has 365 days, so 53 years × 365 days/year = 19,345 days.\n- Adding the leap years from 1970 to 2023: 1972, 1976, 1980, 1984, 1988, 1992, 1996, 2000, 2004, 2008, 2012, 2016, 2020, and 2024 (up to February). This gives us 14 leap years.\n- Total days from leap years: 14 days.\n\nAdding all together:\n- Total days = 19,345 days (from years) + 14 days (from leap years) + 130 days (from July 30, 2023, to December 7, 2023) = 19,489 days.\n\nTherefore, as of December 7, 2023, Christopher Nolan is 19,489 days old."} {'input': 'Who directed the 2023 film Oppenheimer and what is their age in days?', 'output': 'The 2023 film "Oppenheimer" was directed by Christopher Nolan.\n\nTo calculate Christopher Nolan\'s age in days, we first need his birth date, which is July 30, 1970. Let\'s calculate his age in days from his birth date to today\'s date, December 7, 2023.\n\n1. Calculate the total number of days from July 30, 1970, to December 7, 2023.\n2. Christopher Nolan was born on July 30, 1970. From July 30, 1970, to July 30, 2023, is 53 years.\n3. From July 30, 2023, to December 7, 2023, is 130 days.\n\nNow, calculate the total days for 53 years:\n- Each year has 365 days, so 53 years × 365 days/year = 19,345 days.\n- Adding the leap years from 1970 to 2023: 1972, 1976, 1980, 1984, 1988, 1992, 1996, 2000, 2004, 2008, 2012, 2016, 2020, and 2024 (up to February). This gives us 14 leap years.\n- Total days from leap years: 14 days.\n\nAdding all together:\n- Total days = 19,345 days (from years) + 14 days (from leap years) + 130 days (from July 30, 2023, to December 7, 2023) = 19,489 days.\n\nTherefore, as of December 7, 2023, Christopher Nolan is 19,489 days old.'} [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/debugging.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to create custom tools ](/v0.2/docs/how_to/custom_tools/)[ Next How to load CSVs ](/v0.2/docs/how_to/document_loader_csv/) * [Tracing](#tracing) * [`set_debug` and `set_verbose`](#set_debug-and-set_verbose) * [`set_verbose(True)`](#set_verbosetrue) * [`set_debug(True)`](#set_debugtrue)
null
https://python.langchain.com/v0.2/docs/how_to/custom_tools/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to create custom tools On this page How to create custom tools ========================== When constructing an agent, you will need to provide it with a list of `Tool`s that it can use. Besides the actual function that is called, the Tool consists of several components: Attribute Type Description name str Must be unique within a set of tools provided to an LLM or agent. description str Describes what the tool does. Used as context by the LLM or agent. args\_schema Pydantic BaseModel Optional but recommended, can be used to provide more information (e.g., few-shot examples) or validation for expected parameters return\_direct boolean Only relevant for agents. When True, after invoking the given tool, the agent will stop and return the result direcly to the user. LangChain provides 3 ways to create tools: 1. Using [@tool decorator](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html#langchain_core.tools.tool) -- the simplest way to define a custom tool. 2. Using [StructuredTool.from\_function](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.StructuredTool.html#langchain_core.tools.StructuredTool.from_function) class method -- this is similar to the `@tool` decorator, but allows more configuration and specification of both sync and async implementations. 3. By sub-classing from [BaseTool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html) -- This is the most flexible method, it provides the largest degree of control, at the expense of more effort and code. The `@tool` or the `StructuredTool.from_function` class method should be sufficient for most use cases. tip Models will perform better if the tools have well chosen names, descriptions and JSON schemas. @tool decorator[​](#tool-decorator "Direct link to @tool decorator") -------------------------------------------------------------------- This `@tool` decorator is the simplest way to define a custom tool. The decorator uses the function name as the tool name by default, but this can be overridden by passing a string as the first argument. Additionally, the decorator will use the function's docstring as the tool's description - so a docstring MUST be provided. from langchain_core.tools import tool@tooldef multiply(a: int, b: int) -> int: """Multiply two numbers.""" return a * b# Let's inspect some of the attributes associated with the tool.print(multiply.name)print(multiply.description)print(multiply.args) **API Reference:**[tool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html) multiplymultiply(a: int, b: int) -> int - Multiply two numbers.{'a': {'title': 'A', 'type': 'integer'}, 'b': {'title': 'B', 'type': 'integer'}} Or create an **async** implementation, like this: from langchain_core.tools import tool@toolasync def amultiply(a: int, b: int) -> int: """Multiply two numbers.""" return a * b **API Reference:**[tool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html) You can also customize the tool name and JSON args by passing them into the tool decorator. from langchain.pydantic_v1 import BaseModel, Fieldclass CalculatorInput(BaseModel): a: int = Field(description="first number") b: int = Field(description="second number")@tool("multiplication-tool", args_schema=CalculatorInput, return_direct=True)def multiply(a: int, b: int) -> int: """Multiply two numbers.""" return a * b# Let's inspect some of the attributes associated with the tool.print(multiply.name)print(multiply.description)print(multiply.args)print(multiply.return_direct) multiplication-toolmultiplication-tool(a: int, b: int) -> int - Multiply two numbers.{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}True StructuredTool[​](#structuredtool "Direct link to StructuredTool") ------------------------------------------------------------------ The `StrurcturedTool.from_function` class method provides a bit more configurability than the `@tool` decorator, without requiring much additional code. from langchain_core.tools import StructuredTooldef multiply(a: int, b: int) -> int: """Multiply two numbers.""" return a * basync def amultiply(a: int, b: int) -> int: """Multiply two numbers.""" return a * bcalculator = StructuredTool.from_function(func=multiply, coroutine=amultiply)print(calculator.invoke({"a": 2, "b": 3}))print(await calculator.ainvoke({"a": 2, "b": 5})) **API Reference:**[StructuredTool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.StructuredTool.html) 610 To configure it: class CalculatorInput(BaseModel): a: int = Field(description="first number") b: int = Field(description="second number")def multiply(a: int, b: int) -> int: """Multiply two numbers.""" return a * bcalculator = StructuredTool.from_function( func=multiply, name="Calculator", description="multiply numbers", args_schema=CalculatorInput, return_direct=True, # coroutine= ... <- you can specify an async method if desired as well)print(calculator.invoke({"a": 2, "b": 3}))print(calculator.name)print(calculator.description)print(calculator.args) 6CalculatorCalculator(a: int, b: int) -> int - multiply numbers{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}} Subclass BaseTool[​](#subclass-basetool "Direct link to Subclass BaseTool") --------------------------------------------------------------------------- You can define a custom tool by sub-classing from `BaseTool`. This provides maximal control over the tool definition, but requires writing more code. from typing import Optional, Typefrom langchain.pydantic_v1 import BaseModelfrom langchain_core.callbacks import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun,)from langchain_core.tools import BaseToolclass CalculatorInput(BaseModel): a: int = Field(description="first number") b: int = Field(description="second number")class CustomCalculatorTool(BaseTool): name = "Calculator" description = "useful for when you need to answer questions about math" args_schema: Type[BaseModel] = CalculatorInput return_direct: bool = True def _run( self, a: int, b: int, run_manager: Optional[CallbackManagerForToolRun] = None ) -> str: """Use the tool.""" return a * b async def _arun( self, a: int, b: int, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" # If the calculation is cheap, you can just delegate to the sync implementation # as shown below. # If the sync calculation is expensive, you should delete the entire _arun method. # LangChain will automatically provide a better implementation that will # kick off the task in a thread to make sure it doesn't block other async code. return self._run(a, b, run_manager=run_manager.get_sync()) **API Reference:**[AsyncCallbackManagerForToolRun](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.AsyncCallbackManagerForToolRun.html) | [CallbackManagerForToolRun](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.CallbackManagerForToolRun.html) | [BaseTool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html) multiply = CustomCalculatorTool()print(multiply.name)print(multiply.description)print(multiply.args)print(multiply.return_direct)print(multiply.invoke({"a": 2, "b": 3}))print(await multiply.ainvoke({"a": 2, "b": 3})) Calculatoruseful for when you need to answer questions about math{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}True66 How to create async tools[​](#how-to-create-async-tools "Direct link to How to create async tools") --------------------------------------------------------------------------------------------------- LangChain Tools implement the [Runnable interface 🏃](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html). All Runnables expose the `invoke` and `ainvoke` methods (as well as other methods like `batch`, `abatch`, `astream` etc). So even if you only provide an `sync` implementation of a tool, you could still use the `ainvoke` interface, but there are some important things to know: * LangChain's by default provides an async implementation that assumes that the function is expensive to compute, so it'll delegate execution to another thread. * If you're working in an async codebase, you should create async tools rather than sync tools, to avoid incuring a small overhead due to that thread. * If you need both sync and async implementations, use `StructuredTool.from_function` or sub-class from `BaseTool`. * If implementing both sync and async, and the sync code is fast to run, override the default LangChain async implementation and simply call the sync code. * You CANNOT and SHOULD NOT use the sync `invoke` with an `async` tool. from langchain_core.tools import StructuredTooldef multiply(a: int, b: int) -> int: """Multiply two numbers.""" return a * bcalculator = StructuredTool.from_function(func=multiply)print(calculator.invoke({"a": 2, "b": 3}))print( await calculator.ainvoke({"a": 2, "b": 5})) # Uses default LangChain async implementation incurs small overhead **API Reference:**[StructuredTool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.StructuredTool.html) 610 from langchain_core.tools import StructuredTooldef multiply(a: int, b: int) -> int: """Multiply two numbers.""" return a * basync def amultiply(a: int, b: int) -> int: """Multiply two numbers.""" return a * bcalculator = StructuredTool.from_function(func=multiply, coroutine=amultiply)print(calculator.invoke({"a": 2, "b": 3}))print( await calculator.ainvoke({"a": 2, "b": 5})) # Uses use provided amultiply without additional overhead **API Reference:**[StructuredTool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.StructuredTool.html) 610 You should not and cannot use `.invoke` when providing only an async definition. @toolasync def multiply(a: int, b: int) -> int: """Multiply two numbers.""" return a * btry: multiply.invoke({"a": 2, "b": 3})except NotImplementedError: print("Raised not implemented error. You should not be doing this.") Raised not implemented error. You should not be doing this. Handling Tool Errors[​](#handling-tool-errors "Direct link to Handling Tool Errors") ------------------------------------------------------------------------------------ If you're using tools with agents, you will likely need an error handling strategy, so the agent can recover from the error and continue execution. A simple strategy is to throw a `ToolException` from inside the tool and specify an error handler using `handle_tool_error`. When the error handler is specified, the exception will be caught and the error handler will decide which output to return from the tool. You can set `handle_tool_error` to `True`, a string value, or a function. If it's a function, the function should take a `ToolException` as a parameter and return a value. Please note that only raising a `ToolException` won't be effective. You need to first set the `handle_tool_error` of the tool because its default value is `False`. from langchain_core.tools import ToolExceptiondef get_weather(city: str) -> int: """Get weather for the given city.""" raise ToolException(f"Error: There is no city by the name of {city}.") **API Reference:**[ToolException](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.ToolException.html) Here's an example with the default `handle_tool_error=True` behavior. get_weather_tool = StructuredTool.from_function( func=get_weather, handle_tool_error=True,)get_weather_tool.invoke({"city": "foobar"}) 'Error: There is no city by the name of foobar.' We can set `handle_tool_error` to a string that will always be returned. get_weather_tool = StructuredTool.from_function( func=get_weather, handle_tool_error="There is no such city, but it's probably above 0K there!",)get_weather_tool.invoke({"city": "foobar"}) "There is no such city, but it's probably above 0K there!" Handling the error using a function: def _handle_error(error: ToolException) -> str: return f"The following errors occurred during tool execution: `{error.args[0]}`"get_weather_tool = StructuredTool.from_function( func=get_weather, handle_tool_error=_handle_error,)get_weather_tool.invoke({"city": "foobar"}) 'The following errors occurred during tool execution: `Error: There is no city by the name of foobar.`' [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/custom_tools.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous Custom Retriever ](/v0.2/docs/how_to/custom_retriever/)[ Next How to debug your LLM apps ](/v0.2/docs/how_to/debugging/) * [@tool decorator](#tool-decorator) * [StructuredTool](#structuredtool) * [Subclass BaseTool](#subclass-basetool) * [How to create async tools](#how-to-create-async-tools) * [Handling Tool Errors](#handling-tool-errors)
null
https://python.langchain.com/v0.2/docs/how_to/callbacks_async/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to use callbacks in async environments On this page How to use callbacks in async environments ========================================== Prerequisites This guide assumes familiarity with the following concepts: * [Callbacks](/v0.2/docs/concepts/#callbacks) * [Custom callback handlers](/v0.2/docs/how_to/custom_callbacks/) If you are planning to use the async APIs, it is recommended to use and extend [`AsyncCallbackHandler`](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.AsyncCallbackHandler.html) to avoid blocking the event. danger If you use a sync `CallbackHandler` while using an async method to run your LLM / Chain / Tool / Agent, it will still work. However, under the hood, it will be called with [`run_in_executor`](https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.loop.run_in_executor) which can cause issues if your `CallbackHandler` is not thread-safe. danger If you're on `python<=3.10`, you need to remember to propagate `config` or `callbacks` when invoking other `runnable` from within a `RunnableLambda`, `RunnableGenerator` or `@tool`. If you do not do this, the callbacks will not be propagated to the child runnables being invoked. import asynciofrom typing import Any, Dict, Listfrom langchain_anthropic import ChatAnthropicfrom langchain_core.callbacks import AsyncCallbackHandler, BaseCallbackHandlerfrom langchain_core.messages import HumanMessagefrom langchain_core.outputs import LLMResultclass MyCustomSyncHandler(BaseCallbackHandler): def on_llm_new_token(self, token: str, **kwargs) -> None: print(f"Sync handler being called in a `thread_pool_executor`: token: {token}")class MyCustomAsyncHandler(AsyncCallbackHandler): """Async callback handler that can be used to handle callbacks from langchain.""" async def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Run when chain starts running.""" print("zzzz....") await asyncio.sleep(0.3) class_name = serialized["name"] print("Hi! I just woke up. Your llm is starting") async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when chain ends running.""" print("zzzz....") await asyncio.sleep(0.3) print("Hi! I just woke up. Your llm is ending")# To enable streaming, we pass in `streaming=True` to the ChatModel constructor# Additionally, we pass in a list with our custom handlerchat = ChatAnthropic( model="claude-3-sonnet-20240229", max_tokens=25, streaming=True, callbacks=[MyCustomSyncHandler(), MyCustomAsyncHandler()],)await chat.agenerate([[HumanMessage(content="Tell me a joke")]]) **API Reference:**[ChatAnthropic](https://api.python.langchain.com/en/latest/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html) | [AsyncCallbackHandler](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.AsyncCallbackHandler.html) | [BaseCallbackHandler](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html) | [HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) | [LLMResult](https://api.python.langchain.com/en/latest/outputs/langchain_core.outputs.llm_result.LLMResult.html) zzzz....Hi! I just woke up. Your llm is startingSync handler being called in a `thread_pool_executor`: token: HereSync handler being called in a `thread_pool_executor`: token: 'sSync handler being called in a `thread_pool_executor`: token: aSync handler being called in a `thread_pool_executor`: token: littleSync handler being called in a `thread_pool_executor`: token: jokeSync handler being called in a `thread_pool_executor`: token: forSync handler being called in a `thread_pool_executor`: token: youSync handler being called in a `thread_pool_executor`: token: :Sync handler being called in a `thread_pool_executor`: token: WhySync handler being called in a `thread_pool_executor`: token: canSync handler being called in a `thread_pool_executor`: token: 'tSync handler being called in a `thread_pool_executor`: token: aSync handler being called in a `thread_pool_executor`: token: bicycleSync handler being called in a `thread_pool_executor`: token: stanSync handler being called in a `thread_pool_executor`: token: d upSync handler being called in a `thread_pool_executor`: token: bySync handler being called in a `thread_pool_executor`: token: itselfSync handler being called in a `thread_pool_executor`: token: ?Sync handler being called in a `thread_pool_executor`: token: BecauseSync handler being called in a `thread_pool_executor`: token: itSync handler being called in a `thread_pool_executor`: token: 'sSync handler being called in a `thread_pool_executor`: token: twoSync handler being called in a `thread_pool_executor`: token: -Sync handler being called in a `thread_pool_executor`: token: tirezzzz....Hi! I just woke up. Your llm is ending LLMResult(generations=[[ChatGeneration(text="Here's a little joke for you:\n\nWhy can't a bicycle stand up by itself? Because it's two-tire", message=AIMessage(content="Here's a little joke for you:\n\nWhy can't a bicycle stand up by itself? Because it's two-tire", id='run-8afc89e8-02c0-4522-8480-d96977240bd4-0'))]], llm_output={}, run=[RunInfo(run_id=UUID('8afc89e8-02c0-4522-8480-d96977240bd4'))]) Next steps[​](#next-steps "Direct link to Next steps") ------------------------------------------------------ You've now learned how to create your own custom callback handlers. Next, check out the other how-to guides in this section, such as [how to attach callbacks to a runnable](/v0.2/docs/how_to/callbacks_attach/). [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/callbacks_async.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous Caching ](/v0.2/docs/how_to/caching_embeddings/)[ Next How to attach callbacks to a runnable ](/v0.2/docs/how_to/callbacks_attach/) * [Next steps](#next-steps)
null
https://python.langchain.com/v0.2/docs/how_to/document_loader_csv/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to load CSVs On this page How to load CSVs ================ A [comma-separated values (CSV)](https://en.wikipedia.org/wiki/Comma-separated_values) file is a delimited text file that uses a comma to separate values. Each line of the file is a data record. Each record consists of one or more fields, separated by commas. LangChain implements a [CSV Loader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.csv_loader.CSVLoader.html) that will load CSV files into a sequence of [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects. Each row of the CSV file is translated to one document. from langchain_community.document_loaders.csv_loader import CSVLoaderfile_path = ( "../../../docs/integrations/document_loaders/example_data/mlb_teams_2012.csv")loader = CSVLoader(file_path=file_path)data = loader.load()for record in data[:2]: print(record) **API Reference:**[CSVLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.csv_loader.CSVLoader.html) page_content='Team: Nationals\n"Payroll (millions)": 81.34\n"Wins": 98' metadata={'source': '../../../docs/integrations/document_loaders/example_data/mlb_teams_2012.csv', 'row': 0}page_content='Team: Reds\n"Payroll (millions)": 82.20\n"Wins": 97' metadata={'source': '../../../docs/integrations/document_loaders/example_data/mlb_teams_2012.csv', 'row': 1} Customizing the CSV parsing and loading[​](#customizing-the-csv-parsing-and-loading "Direct link to Customizing the CSV parsing and loading") --------------------------------------------------------------------------------------------------------------------------------------------- `CSVLoader` will accept a `csv_args` kwarg that supports customization of arguments passed to Python's `csv.DictReader`. See the [csv module](https://docs.python.org/3/library/csv.html) documentation for more information of what csv args are supported. loader = CSVLoader( file_path=file_path, csv_args={ "delimiter": ",", "quotechar": '"', "fieldnames": ["MLB Team", "Payroll in millions", "Wins"], },)data = loader.load()for record in data[:2]: print(record) page_content='MLB Team: Team\nPayroll in millions: "Payroll (millions)"\nWins: "Wins"' metadata={'source': '../../../docs/integrations/document_loaders/example_data/mlb_teams_2012.csv', 'row': 0}page_content='MLB Team: Nationals\nPayroll in millions: 81.34\nWins: 98' metadata={'source': '../../../docs/integrations/document_loaders/example_data/mlb_teams_2012.csv', 'row': 1} Specify a column to identify the document source[​](#specify-a-column-to-identify-the-document-source "Direct link to Specify a column to identify the document source") ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ The `"source"` key on [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) metadata can be set using a column of the CSV. Use the `source_column` argument to specify a source for the document created from each row. Otherwise `file_path` will be used as the source for all documents created from the CSV file. This is useful when using documents loaded from CSV files for chains that answer questions using sources. loader = CSVLoader(file_path=file_path, source_column="Team")data = loader.load()for record in data[:2]: print(record) page_content='Team: Nationals\n"Payroll (millions)": 81.34\n"Wins": 98' metadata={'source': 'Nationals', 'row': 0}page_content='Team: Reds\n"Payroll (millions)": 82.20\n"Wins": 97' metadata={'source': 'Reds', 'row': 1} Load from a string[​](#load-from-a-string "Direct link to Load from a string") ------------------------------------------------------------------------------ Python's `tempfile` can be used when working with CSV strings directly. import tempfilefrom io import StringIOstring_data = """"Team", "Payroll (millions)", "Wins""Nationals", 81.34, 98"Reds", 82.20, 97"Yankees", 197.96, 95"Giants", 117.62, 94""".strip()with tempfile.NamedTemporaryFile(delete=False, mode="w+") as temp_file: temp_file.write(string_data) temp_file_path = temp_file.nameloader = CSVLoader(file_path=temp_file_path)loader.load()for record in data[:2]: print(record) page_content='Team: Nationals\n"Payroll (millions)": 81.34\n"Wins": 98' metadata={'source': 'Nationals', 'row': 0}page_content='Team: Reds\n"Payroll (millions)": 82.20\n"Wins": 97' metadata={'source': 'Reds', 'row': 1} [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/document_loader_csv.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to debug your LLM apps ](/v0.2/docs/how_to/debugging/)[ Next How to load documents from a directory ](/v0.2/docs/how_to/document_loader_directory/) * [Customizing the CSV parsing and loading](#customizing-the-csv-parsing-and-loading) * [Specify a column to identify the document source](#specify-a-column-to-identify-the-document-source) * [Load from a string](#load-from-a-string)
null
https://python.langchain.com/v0.2/docs/how_to/document_loader_directory/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to load documents from a directory On this page How to load documents from a directory ====================================== LangChain's [DirectoryLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.directory.DirectoryLoader.html) implements functionality for reading files from disk into LangChain [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects. Here we demonstrate: * How to load from a filesystem, including use of wildcard patterns; * How to use multithreading for file I/O; * How to use custom loader classes to parse specific file types (e.g., code); * How to handle errors, such as those due to decoding. from langchain_community.document_loaders import DirectoryLoader **API Reference:**[DirectoryLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.directory.DirectoryLoader.html) `DirectoryLoader` accepts a `loader_cls` kwarg, which defaults to [UnstructuredLoader](/v0.2/docs/integrations/document_loaders/unstructured_file/). [Unstructured](https://unstructured-io.github.io/unstructured/) supports parsing for a number of formats, such as PDF and HTML. Here we use it to read in a markdown (.md) file. We can use the `glob` parameter to control which files to load. Note that here it doesn't load the `.rst` file or the `.html` files. loader = DirectoryLoader("../", glob="**/*.md")docs = loader.load()len(docs) 20 print(docs[0].page_content[:100]) SecurityLangChain has a large ecosystem of integrations with various external resources like local Show a progress bar[​](#show-a-progress-bar "Direct link to Show a progress bar") --------------------------------------------------------------------------------- By default a progress bar will not be shown. To show a progress bar, install the `tqdm` library (e.g. `pip install tqdm`), and set the `show_progress` parameter to `True`. loader = DirectoryLoader("../", glob="**/*.md", show_progress=True)docs = loader.load() 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 20/20 [00:00<00:00, 54.56it/s] Use multithreading[​](#use-multithreading "Direct link to Use multithreading") ------------------------------------------------------------------------------ By default the loading happens in one thread. In order to utilize several threads set the `use_multithreading` flag to true. loader = DirectoryLoader("../", glob="**/*.md", use_multithreading=True)docs = loader.load() Change loader class[​](#change-loader-class "Direct link to Change loader class") --------------------------------------------------------------------------------- By default this uses the `UnstructuredLoader` class. To customize the loader, specify the loader class in the `loader_cls` kwarg. Below we show an example using [TextLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.text.TextLoader.html): from langchain_community.document_loaders import TextLoaderloader = DirectoryLoader("../", glob="**/*.md", loader_cls=TextLoader)docs = loader.load() **API Reference:**[TextLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.text.TextLoader.html) print(docs[0].page_content[:100]) # SecurityLangChain has a large ecosystem of integrations with various external resources like loc Notice that while the `UnstructuredLoader` parses Markdown headers, `TextLoader` does not. If you need to load Python source code files, use the `PythonLoader`: from langchain_community.document_loaders import PythonLoaderloader = DirectoryLoader("../../../../../", glob="**/*.py", loader_cls=PythonLoader) **API Reference:**[PythonLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.python.PythonLoader.html) Auto-detect file encodings with TextLoader[​](#auto-detect-file-encodings-with-textloader "Direct link to Auto-detect file encodings with TextLoader") ------------------------------------------------------------------------------------------------------------------------------------------------------ `DirectoryLoader` can help manage errors due to variations in file encodings. Below we will attempt to load in a collection of files, one of which includes non-UTF8 encodings. path = "../../../../libs/langchain/tests/unit_tests/examples/"loader = DirectoryLoader(path, glob="**/*.txt", loader_cls=TextLoader) ### A. Default Behavior[​](#a-default-behavior "Direct link to A. Default Behavior") By default we raise an error: loader.load() Error loading file ../../../../libs/langchain/tests/unit_tests/examples/example-non-utf8.txt ---------------------------------------------------------------------------``````outputUnicodeDecodeError Traceback (most recent call last)``````outputFile ~/repos/langchain/libs/community/langchain_community/document_loaders/text.py:43, in TextLoader.lazy_load(self) 42 with open(self.file_path, encoding=self.encoding) as f:---> 43 text = f.read() 44 except UnicodeDecodeError as e:``````outputFile ~/.pyenv/versions/3.10.4/lib/python3.10/codecs.py:322, in BufferedIncrementalDecoder.decode(self, input, final) 321 data = self.buffer + input--> 322 (result, consumed) = self._buffer_decode(data, self.errors, final) 323 # keep undecoded input until the next call``````outputUnicodeDecodeError: 'utf-8' codec can't decode byte 0xca in position 0: invalid continuation byte``````outputThe above exception was the direct cause of the following exception:``````outputRuntimeError Traceback (most recent call last)``````outputCell In[10], line 1----> 1 loader.load()``````outputFile ~/repos/langchain/libs/community/langchain_community/document_loaders/directory.py:117, in DirectoryLoader.load(self) 115 def load(self) -> List[Document]: 116 """Load documents."""--> 117 return list(self.lazy_load())``````outputFile ~/repos/langchain/libs/community/langchain_community/document_loaders/directory.py:182, in DirectoryLoader.lazy_load(self) 180 else: 181 for i in items:--> 182 yield from self._lazy_load_file(i, p, pbar) 184 if pbar: 185 pbar.close()``````outputFile ~/repos/langchain/libs/community/langchain_community/document_loaders/directory.py:220, in DirectoryLoader._lazy_load_file(self, item, path, pbar) 218 else: 219 logger.error(f"Error loading file {str(item)}")--> 220 raise e 221 finally: 222 if pbar:``````outputFile ~/repos/langchain/libs/community/langchain_community/document_loaders/directory.py:210, in DirectoryLoader._lazy_load_file(self, item, path, pbar) 208 loader = self.loader_cls(str(item), **self.loader_kwargs) 209 try:--> 210 for subdoc in loader.lazy_load(): 211 yield subdoc 212 except NotImplementedError:``````outputFile ~/repos/langchain/libs/community/langchain_community/document_loaders/text.py:56, in TextLoader.lazy_load(self) 54 continue 55 else:---> 56 raise RuntimeError(f"Error loading {self.file_path}") from e 57 except Exception as e: 58 raise RuntimeError(f"Error loading {self.file_path}") from e``````outputRuntimeError: Error loading ../../../../libs/langchain/tests/unit_tests/examples/example-non-utf8.txt The file `example-non-utf8.txt` uses a different encoding, so the `load()` function fails with a helpful message indicating which file failed decoding. With the default behavior of `TextLoader` any failure to load any of the documents will fail the whole loading process and no documents are loaded. ### B. Silent fail[​](#b-silent-fail "Direct link to B. Silent fail") We can pass the parameter `silent_errors` to the `DirectoryLoader` to skip the files which could not be loaded and continue the load process. loader = DirectoryLoader( path, glob="**/*.txt", loader_cls=TextLoader, silent_errors=True)docs = loader.load() Error loading file ../../../../libs/langchain/tests/unit_tests/examples/example-non-utf8.txt: Error loading ../../../../libs/langchain/tests/unit_tests/examples/example-non-utf8.txt doc_sources = [doc.metadata["source"] for doc in docs]doc_sources ['../../../../libs/langchain/tests/unit_tests/examples/example-utf8.txt'] ### C. Auto detect encodings[​](#c-auto-detect-encodings "Direct link to C. Auto detect encodings") We can also ask `TextLoader` to auto detect the file encoding before failing, by passing the `autodetect_encoding` to the loader class. text_loader_kwargs = {"autodetect_encoding": True}loader = DirectoryLoader( path, glob="**/*.txt", loader_cls=TextLoader, loader_kwargs=text_loader_kwargs)docs = loader.load() doc_sources = [doc.metadata["source"] for doc in docs]doc_sources ['../../../../libs/langchain/tests/unit_tests/examples/example-utf8.txt', '../../../../libs/langchain/tests/unit_tests/examples/example-non-utf8.txt'] [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/document_loader_directory.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to load CSVs ](/v0.2/docs/how_to/document_loader_csv/)[ Next How to load HTML ](/v0.2/docs/how_to/document_loader_html/) * [Show a progress bar](#show-a-progress-bar) * [Use multithreading](#use-multithreading) * [Change loader class](#change-loader-class) * [Auto-detect file encodings with TextLoader](#auto-detect-file-encodings-with-textloader) * [A. Default Behavior](#a-default-behavior) * [B. Silent fail](#b-silent-fail) * [C. Auto detect encodings](#c-auto-detect-encodings)
null
https://python.langchain.com/v0.2/docs/how_to/callbacks_attach/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to attach callbacks to a runnable On this page How to attach callbacks to a runnable ===================================== Prerequisites This guide assumes familiarity with the following concepts: * [Callbacks](/v0.2/docs/concepts/#callbacks) * [Custom callback handlers](/v0.2/docs/how_to/custom_callbacks/) * [Chaining runnables](/v0.2/docs/how_to/sequence/) * [Attach runtime arguments to a Runnable](/v0.2/docs/how_to/binding/) If you are composing a chain of runnables and want to reuse callbacks across multiple executions, you can attach callbacks with the [`.with_config()`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_config) method. This saves you the need to pass callbacks in each time you invoke the chain. info `with_config()` binds a configuration which will be interpreted as **runtime** configuration. So these callbacks will propagate to all child components. Here's an example: from typing import Any, Dict, Listfrom langchain_anthropic import ChatAnthropicfrom langchain_core.callbacks import BaseCallbackHandlerfrom langchain_core.messages import BaseMessagefrom langchain_core.outputs import LLMResultfrom langchain_core.prompts import ChatPromptTemplateclass LoggingHandler(BaseCallbackHandler): def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs ) -> None: print("Chat model started") def on_llm_end(self, response: LLMResult, **kwargs) -> None: print(f"Chat model ended, response: {response}") def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs ) -> None: print(f"Chain {serialized.get('name')} started") def on_chain_end(self, outputs: Dict[str, Any], **kwargs) -> None: print(f"Chain ended, outputs: {outputs}")callbacks = [LoggingHandler()]llm = ChatAnthropic(model="claude-3-sonnet-20240229")prompt = ChatPromptTemplate.from_template("What is 1 + {number}?")chain = prompt | llmchain_with_callbacks = chain.with_config(callbacks=callbacks)chain_with_callbacks.invoke({"number": "2"}) **API Reference:**[ChatAnthropic](https://api.python.langchain.com/en/latest/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html) | [BaseCallbackHandler](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html) | [BaseMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.base.BaseMessage.html) | [LLMResult](https://api.python.langchain.com/en/latest/outputs/langchain_core.outputs.llm_result.LLMResult.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) Chain RunnableSequence startedChain ChatPromptTemplate startedChain ended, outputs: messages=[HumanMessage(content='What is 1 + 2?')]Chat model startedChat model ended, response: generations=[[ChatGeneration(text='1 + 2 = 3', message=AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01NTYMsH9YxkoWsiPYs4Lemn', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-d6bcfd72-9c94-466d-bac0-f39e456ad6e3-0'))]] llm_output={'id': 'msg_01NTYMsH9YxkoWsiPYs4Lemn', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} run=NoneChain ended, outputs: content='1 + 2 = 3' response_metadata={'id': 'msg_01NTYMsH9YxkoWsiPYs4Lemn', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} id='run-d6bcfd72-9c94-466d-bac0-f39e456ad6e3-0' AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01NTYMsH9YxkoWsiPYs4Lemn', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-d6bcfd72-9c94-466d-bac0-f39e456ad6e3-0') The bound callbacks will run for all nested module runs. Next steps[​](#next-steps "Direct link to Next steps") ------------------------------------------------------ You've now learned how to attach callbacks to a chain. Next, check out the other how-to guides in this section, such as how to [pass callbacks in at runtime](/v0.2/docs/how_to/callbacks_runtime/). [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/callbacks_attach.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to use callbacks in async environments ](/v0.2/docs/how_to/callbacks_async/)[ Next How to propagate callbacks constructor ](/v0.2/docs/how_to/callbacks_constructor/) * [Next steps](#next-steps)
null
https://python.langchain.com/v0.2/docs/how_to/document_loader_html/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to load HTML On this page How to load HTML ================ The HyperText Markup Language or [HTML](https://en.wikipedia.org/wiki/HTML) is the standard markup language for documents designed to be displayed in a web browser. This covers how to load `HTML` documents into a LangChain [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects that we can use downstream. Parsing HTML files often requires specialized tools. Here we demonstrate parsing via [Unstructured](https://unstructured-io.github.io/unstructured/) and [BeautifulSoup4](https://beautiful-soup-4.readthedocs.io/en/latest/), which can be installed via pip. Head over to the integrations page to find integrations with additional services, such as [Azure AI Document Intelligence](/v0.2/docs/integrations/document_loaders/azure_document_intelligence/) or [FireCrawl](/v0.2/docs/integrations/document_loaders/firecrawl/). Loading HTML with Unstructured[​](#loading-html-with-unstructured "Direct link to Loading HTML with Unstructured") ------------------------------------------------------------------------------------------------------------------ %pip install "unstructured[html]" from langchain_community.document_loaders import UnstructuredHTMLLoaderfile_path = "../../../docs/integrations/document_loaders/example_data/fake-content.html"loader = UnstructuredHTMLLoader(file_path)data = loader.load()print(data) **API Reference:**[UnstructuredHTMLLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.html.UnstructuredHTMLLoader.html) [Document(page_content='My First Heading\n\nMy first paragraph.', metadata={'source': '../../../docs/integrations/document_loaders/example_data/fake-content.html'})] Loading HTML with BeautifulSoup4[​](#loading-html-with-beautifulsoup4 "Direct link to Loading HTML with BeautifulSoup4") ------------------------------------------------------------------------------------------------------------------------ We can also use `BeautifulSoup4` to load HTML documents using the `BSHTMLLoader`. This will extract the text from the HTML into `page_content`, and the page title as `title` into `metadata`. %pip install bs4 from langchain_community.document_loaders import BSHTMLLoaderloader = BSHTMLLoader(file_path)data = loader.load()print(data) **API Reference:**[BSHTMLLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.html_bs.BSHTMLLoader.html) [Document(page_content='\nTest Title\n\n\nMy First Heading\nMy first paragraph.\n\n\n', metadata={'source': '../../../docs/integrations/document_loaders/example_data/fake-content.html', 'title': 'Test Title'})] [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/document_loader_html.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to load documents from a directory ](/v0.2/docs/how_to/document_loader_directory/)[ Next How to load JSON ](/v0.2/docs/how_to/document_loader_json/) * [Loading HTML with Unstructured](#loading-html-with-unstructured) * [Loading HTML with BeautifulSoup4](#loading-html-with-beautifulsoup4)
null
https://python.langchain.com/v0.2/docs/how_to/callbacks_constructor/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to propagate callbacks constructor On this page How to propagate callbacks constructor ====================================== Prerequisites This guide assumes familiarity with the following concepts: * [Callbacks](/v0.2/docs/concepts/#callbacks) * [Custom callback handlers](/v0.2/docs/how_to/custom_callbacks/) Most LangChain modules allow you to pass `callbacks` directly into the constructor (i.e., initializer). In this case, the callbacks will only be called for that instance (and any nested runs). danger Constructor callbacks are scoped only to the object they are defined on. They are **not** inherited by children of the object. This can lead to confusing behavior, and it's generally better to pass callbacks as a run time argument. Here's an example: from typing import Any, Dict, Listfrom langchain_anthropic import ChatAnthropicfrom langchain_core.callbacks import BaseCallbackHandlerfrom langchain_core.messages import BaseMessagefrom langchain_core.outputs import LLMResultfrom langchain_core.prompts import ChatPromptTemplateclass LoggingHandler(BaseCallbackHandler): def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs ) -> None: print("Chat model started") def on_llm_end(self, response: LLMResult, **kwargs) -> None: print(f"Chat model ended, response: {response}") def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs ) -> None: print(f"Chain {serialized.get('name')} started") def on_chain_end(self, outputs: Dict[str, Any], **kwargs) -> None: print(f"Chain ended, outputs: {outputs}")callbacks = [LoggingHandler()]llm = ChatAnthropic(model="claude-3-sonnet-20240229", callbacks=callbacks)prompt = ChatPromptTemplate.from_template("What is 1 + {number}?")chain = prompt | llmchain.invoke({"number": "2"}) **API Reference:**[ChatAnthropic](https://api.python.langchain.com/en/latest/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html) | [BaseCallbackHandler](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html) | [BaseMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.base.BaseMessage.html) | [LLMResult](https://api.python.langchain.com/en/latest/outputs/langchain_core.outputs.llm_result.LLMResult.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) Chat model startedChat model ended, response: generations=[[ChatGeneration(text='1 + 2 = 3', message=AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01CdKsRmeS9WRb8BWnHDEHm7', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-2d7fdf2a-7405-4e17-97c0-67e6b2a65305-0'))]] llm_output={'id': 'msg_01CdKsRmeS9WRb8BWnHDEHm7', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} run=None AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01CdKsRmeS9WRb8BWnHDEHm7', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-2d7fdf2a-7405-4e17-97c0-67e6b2a65305-0') You can see that we only see events from the chat model run - no chain events from the prompt or broader chain. Next steps[​](#next-steps "Direct link to Next steps") ------------------------------------------------------ You've now learned how to pass callbacks into a constructor. Next, check out the other how-to guides in this section, such as how to [pass callbacks at runtime](/v0.2/docs/how_to/callbacks_runtime/). [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/callbacks_constructor.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to attach callbacks to a runnable ](/v0.2/docs/how_to/callbacks_attach/)[ Next How to pass callbacks in at runtime ](/v0.2/docs/how_to/callbacks_runtime/) * [Next steps](#next-steps)
null
https://python.langchain.com/v0.2/docs/how_to/callbacks_runtime/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to pass callbacks in at runtime On this page How to pass callbacks in at runtime =================================== Prerequisites This guide assumes familiarity with the following concepts: * [Callbacks](/v0.2/docs/concepts/#callbacks) * [Custom callback handlers](/v0.2/docs/how_to/custom_callbacks/) In many cases, it is advantageous to pass in handlers instead when running the object. When we pass through [`CallbackHandlers`](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) using the `callbacks` keyword arg when executing an run, those callbacks will be issued by all nested objects involved in the execution. For example, when a handler is passed through to an Agent, it will be used for all callbacks related to the agent and all the objects involved in the agent's execution, in this case, the Tools and LLM. This prevents us from having to manually attach the handlers to each individual nested object. Here's an example: from typing import Any, Dict, Listfrom langchain_anthropic import ChatAnthropicfrom langchain_core.callbacks import BaseCallbackHandlerfrom langchain_core.messages import BaseMessagefrom langchain_core.outputs import LLMResultfrom langchain_core.prompts import ChatPromptTemplateclass LoggingHandler(BaseCallbackHandler): def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs ) -> None: print("Chat model started") def on_llm_end(self, response: LLMResult, **kwargs) -> None: print(f"Chat model ended, response: {response}") def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs ) -> None: print(f"Chain {serialized.get('name')} started") def on_chain_end(self, outputs: Dict[str, Any], **kwargs) -> None: print(f"Chain ended, outputs: {outputs}")callbacks = [LoggingHandler()]llm = ChatAnthropic(model="claude-3-sonnet-20240229")prompt = ChatPromptTemplate.from_template("What is 1 + {number}?")chain = prompt | llmchain.invoke({"number": "2"}, config={"callbacks": callbacks}) **API Reference:**[ChatAnthropic](https://api.python.langchain.com/en/latest/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html) | [BaseCallbackHandler](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html) | [BaseMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.base.BaseMessage.html) | [LLMResult](https://api.python.langchain.com/en/latest/outputs/langchain_core.outputs.llm_result.LLMResult.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) Chain RunnableSequence startedChain ChatPromptTemplate startedChain ended, outputs: messages=[HumanMessage(content='What is 1 + 2?')]Chat model startedChat model ended, response: generations=[[ChatGeneration(text='1 + 2 = 3', message=AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01D8Tt5FdtBk5gLTfBPm2tac', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-bb0dddd8-85f3-4e6b-8553-eaa79f859ef8-0'))]] llm_output={'id': 'msg_01D8Tt5FdtBk5gLTfBPm2tac', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} run=NoneChain ended, outputs: content='1 + 2 = 3' response_metadata={'id': 'msg_01D8Tt5FdtBk5gLTfBPm2tac', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} id='run-bb0dddd8-85f3-4e6b-8553-eaa79f859ef8-0' AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01D8Tt5FdtBk5gLTfBPm2tac', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-bb0dddd8-85f3-4e6b-8553-eaa79f859ef8-0') If there are already existing callbacks associated with a module, these will run in addition to any passed in at runtime. Next steps[​](#next-steps "Direct link to Next steps") ------------------------------------------------------ You've now learned how to pass callbacks at runtime. Next, check out the other how-to guides in this section, such as how to [pass callbacks into a module constructor](/v0.2/docs/how_to/custom_callbacks/). [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/callbacks_runtime.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to propagate callbacks constructor ](/v0.2/docs/how_to/callbacks_constructor/)[ Next How to split by character ](/v0.2/docs/how_to/character_text_splitter/) * [Next steps](#next-steps)
null
https://python.langchain.com/v0.2/docs/how_to/document_loader_markdown/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to load Markdown On this page How to load Markdown ==================== [Markdown](https://en.wikipedia.org/wiki/Markdown) is a lightweight markup language for creating formatted text using a plain-text editor. Here we cover how to load `Markdown` documents into LangChain [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects that we can use downstream. We will cover: * Basic usage; * Parsing of Markdown into elements such as titles, list items, and text. LangChain implements an [UnstructuredMarkdownLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.markdown.UnstructuredMarkdownLoader.html) object which requires the [Unstructured](https://unstructured-io.github.io/unstructured/) package. First we install it: # !pip install "unstructured[md]" Basic usage will ingest a Markdown file to a single document. Here we demonstrate on LangChain's readme: from langchain_community.document_loaders import UnstructuredMarkdownLoaderfrom langchain_core.documents import Documentmarkdown_path = "../../../../README.md"loader = UnstructuredMarkdownLoader(markdown_path)data = loader.load()assert len(data) == 1assert isinstance(data[0], Document)readme_content = data[0].page_contentprint(readme_content[:250]) **API Reference:**[UnstructuredMarkdownLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.markdown.UnstructuredMarkdownLoader.html) | [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) 🦜️🔗 LangChain⚡ Build context-aware reasoning applications ⚡Looking for the JS/TS library? Check out LangChain.js.To help you ship LangChain apps to production faster, check out LangSmith. LangSmith is a unified developer platform for building, Retain Elements[​](#retain-elements "Direct link to Retain Elements") --------------------------------------------------------------------- Under the hood, Unstructured creates different "elements" for different chunks of text. By default we combine those together, but you can easily keep that separation by specifying `mode="elements"`. loader = UnstructuredMarkdownLoader(markdown_path, mode="elements")data = loader.load()print(f"Number of documents: {len(data)}\n")for document in data[:2]: print(f"{document}\n") Number of documents: 65page_content='🦜️🔗 LangChain' metadata={'source': '../../../../README.md', 'last_modified': '2024-04-29T13:40:19', 'page_number': 1, 'languages': ['eng'], 'filetype': 'text/markdown', 'file_directory': '../../../..', 'filename': 'README.md', 'category': 'Title'}page_content='⚡ Build context-aware reasoning applications ⚡' metadata={'source': '../../../../README.md', 'last_modified': '2024-04-29T13:40:19', 'page_number': 1, 'languages': ['eng'], 'parent_id': 'c3223b6f7100be08a78f1e8c0c28fde1', 'filetype': 'text/markdown', 'file_directory': '../../../..', 'filename': 'README.md', 'category': 'NarrativeText'} Note that in this case we recover three distinct element types: print(set(document.metadata["category"] for document in data)) {'Title', 'NarrativeText', 'ListItem'} [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/document_loader_markdown.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to load JSON ](/v0.2/docs/how_to/document_loader_json/)[ Next How to load Microsoft Office files ](/v0.2/docs/how_to/document_loader_office_file/) * [Retain Elements](#retain-elements)
null
https://python.langchain.com/v0.2/docs/how_to/document_loader_json/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to load JSON On this page How to load JSON ================ [JSON (JavaScript Object Notation)](https://en.wikipedia.org/wiki/JSON) is an open standard file format and data interchange format that uses human-readable text to store and transmit data objects consisting of attribute–value pairs and arrays (or other serializable values). [JSON Lines](https://jsonlines.org/) is a file format where each line is a valid JSON value. LangChain implements a [JSONLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.json_loader.JSONLoader.html) to convert JSON and JSONL data into LangChain [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects. It uses a specified [jq schema](https://en.wikipedia.org/wiki/Jq_\(programming_language\)) to parse the JSON files, allowing for the extraction of specific fields into the content and metadata of the LangChain Document. It uses the `jq` python package. Check out this [manual](https://stedolan.github.io/jq/manual/#Basicfilters) for a detailed documentation of the `jq` syntax. Here we will demonstrate: * How to load JSON and JSONL data into the content of a LangChain `Document`; * How to load JSON and JSONL data into metadata associated with a `Document`. #!pip install jq from langchain_community.document_loaders import JSONLoader **API Reference:**[JSONLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.json_loader.JSONLoader.html) import jsonfrom pathlib import Pathfrom pprint import pprintfile_path='./example_data/facebook_chat.json'data = json.loads(Path(file_path).read_text()) pprint(data) {'image': {'creation_timestamp': 1675549016, 'uri': 'image_of_the_chat.jpg'}, 'is_still_participant': True, 'joinable_mode': {'link': '', 'mode': 1}, 'magic_words': [], 'messages': [{'content': 'Bye!', 'sender_name': 'User 2', 'timestamp_ms': 1675597571851}, {'content': 'Oh no worries! Bye', 'sender_name': 'User 1', 'timestamp_ms': 1675597435669}, {'content': 'No Im sorry it was my mistake, the blue one is not ' 'for sale', 'sender_name': 'User 2', 'timestamp_ms': 1675596277579}, {'content': 'I thought you were selling the blue one!', 'sender_name': 'User 1', 'timestamp_ms': 1675595140251}, {'content': 'Im not interested in this bag. Im interested in the ' 'blue one!', 'sender_name': 'User 1', 'timestamp_ms': 1675595109305}, {'content': 'Here is $129', 'sender_name': 'User 2', 'timestamp_ms': 1675595068468}, {'photos': [{'creation_timestamp': 1675595059, 'uri': 'url_of_some_picture.jpg'}], 'sender_name': 'User 2', 'timestamp_ms': 1675595060730}, {'content': 'Online is at least $100', 'sender_name': 'User 2', 'timestamp_ms': 1675595045152}, {'content': 'How much do you want?', 'sender_name': 'User 1', 'timestamp_ms': 1675594799696}, {'content': 'Goodmorning! $50 is too low.', 'sender_name': 'User 2', 'timestamp_ms': 1675577876645}, {'content': 'Hi! Im interested in your bag. Im offering $50. Let ' 'me know if you are interested. Thanks!', 'sender_name': 'User 1', 'timestamp_ms': 1675549022673}], 'participants': [{'name': 'User 1'}, {'name': 'User 2'}], 'thread_path': 'inbox/User 1 and User 2 chat', 'title': 'User 1 and User 2 chat'} Using `JSONLoader`[​](#using-jsonloader "Direct link to using-jsonloader") -------------------------------------------------------------------------- Suppose we are interested in extracting the values under the `content` field within the `messages` key of the JSON data. This can easily be done through the `JSONLoader` as shown below. ### JSON file[​](#json-file "Direct link to JSON file") loader = JSONLoader( file_path='./example_data/facebook_chat.json', jq_schema='.messages[].content', text_content=False)data = loader.load() pprint(data) [Document(page_content='Bye!', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 1}), Document(page_content='Oh no worries! Bye', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 2}), Document(page_content='No Im sorry it was my mistake, the blue one is not for sale', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 3}), Document(page_content='I thought you were selling the blue one!', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 4}), Document(page_content='Im not interested in this bag. Im interested in the blue one!', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 5}), Document(page_content='Here is $129', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 6}), Document(page_content='', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 7}), Document(page_content='Online is at least $100', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 8}), Document(page_content='How much do you want?', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 9}), Document(page_content='Goodmorning! $50 is too low.', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 10}), Document(page_content='Hi! Im interested in your bag. Im offering $50. Let me know if you are interested. Thanks!', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 11})] ### JSON Lines file[​](#json-lines-file "Direct link to JSON Lines file") If you want to load documents from a JSON Lines file, you pass `json_lines=True` and specify `jq_schema` to extract `page_content` from a single JSON object. file_path = './example_data/facebook_chat_messages.jsonl'pprint(Path(file_path).read_text()) ('{"sender_name": "User 2", "timestamp_ms": 1675597571851, "content": "Bye!"}\n' '{"sender_name": "User 1", "timestamp_ms": 1675597435669, "content": "Oh no ' 'worries! Bye"}\n' '{"sender_name": "User 2", "timestamp_ms": 1675596277579, "content": "No Im ' 'sorry it was my mistake, the blue one is not for sale"}\n') loader = JSONLoader( file_path='./example_data/facebook_chat_messages.jsonl', jq_schema='.content', text_content=False, json_lines=True)data = loader.load() pprint(data) [Document(page_content='Bye!', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat_messages.jsonl', 'seq_num': 1}), Document(page_content='Oh no worries! Bye', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat_messages.jsonl', 'seq_num': 2}), Document(page_content='No Im sorry it was my mistake, the blue one is not for sale', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat_messages.jsonl', 'seq_num': 3})] Another option is set `jq_schema='.'` and provide `content_key`: loader = JSONLoader( file_path='./example_data/facebook_chat_messages.jsonl', jq_schema='.', content_key='sender_name', json_lines=True)data = loader.load() pprint(data) [Document(page_content='User 2', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat_messages.jsonl', 'seq_num': 1}), Document(page_content='User 1', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat_messages.jsonl', 'seq_num': 2}), Document(page_content='User 2', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat_messages.jsonl', 'seq_num': 3})] ### JSON file with jq schema `content_key`[​](#json-file-with-jq-schema-content_key "Direct link to json-file-with-jq-schema-content_key") To load documents from a JSON file using the content\_key within the jq schema, set is\_content\_key\_jq\_parsable=True. Ensure that content\_key is compatible and can be parsed using the jq schema. file_path = './sample.json'pprint(Path(file_path).read_text()) {"data": [ {"attributes": { "message": "message1", "tags": [ "tag1"]}, "id": "1"}, {"attributes": { "message": "message2", "tags": [ "tag2"]}, "id": "2"}]} loader = JSONLoader( file_path=file_path, jq_schema=".data[]", content_key=".attributes.message", is_content_key_jq_parsable=True,)data = loader.load() pprint(data) [Document(page_content='message1', metadata={'source': '/path/to/sample.json', 'seq_num': 1}), Document(page_content='message2', metadata={'source': '/path/to/sample.json', 'seq_num': 2})] Extracting metadata[​](#extracting-metadata "Direct link to Extracting metadata") --------------------------------------------------------------------------------- Generally, we want to include metadata available in the JSON file into the documents that we create from the content. The following demonstrates how metadata can be extracted using the `JSONLoader`. There are some key changes to be noted. In the previous example where we didn't collect the metadata, we managed to directly specify in the schema where the value for the `page_content` can be extracted from. .messages[].content In the current example, we have to tell the loader to iterate over the records in the `messages` field. The jq\_schema then has to be: .messages[] This allows us to pass the records (dict) into the `metadata_func` that has to be implemented. The `metadata_func` is responsible for identifying which pieces of information in the record should be included in the metadata stored in the final `Document` object. Additionally, we now have to explicitly specify in the loader, via the `content_key` argument, the key from the record where the value for the `page_content` needs to be extracted from. # Define the metadata extraction function.def metadata_func(record: dict, metadata: dict) -> dict: metadata["sender_name"] = record.get("sender_name") metadata["timestamp_ms"] = record.get("timestamp_ms") return metadataloader = JSONLoader( file_path='./example_data/facebook_chat.json', jq_schema='.messages[]', content_key="content", metadata_func=metadata_func)data = loader.load() pprint(data) [Document(page_content='Bye!', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 1, 'sender_name': 'User 2', 'timestamp_ms': 1675597571851}), Document(page_content='Oh no worries! Bye', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 2, 'sender_name': 'User 1', 'timestamp_ms': 1675597435669}), Document(page_content='No Im sorry it was my mistake, the blue one is not for sale', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 3, 'sender_name': 'User 2', 'timestamp_ms': 1675596277579}), Document(page_content='I thought you were selling the blue one!', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 4, 'sender_name': 'User 1', 'timestamp_ms': 1675595140251}), Document(page_content='Im not interested in this bag. Im interested in the blue one!', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 5, 'sender_name': 'User 1', 'timestamp_ms': 1675595109305}), Document(page_content='Here is $129', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 6, 'sender_name': 'User 2', 'timestamp_ms': 1675595068468}), Document(page_content='', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 7, 'sender_name': 'User 2', 'timestamp_ms': 1675595060730}), Document(page_content='Online is at least $100', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 8, 'sender_name': 'User 2', 'timestamp_ms': 1675595045152}), Document(page_content='How much do you want?', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 9, 'sender_name': 'User 1', 'timestamp_ms': 1675594799696}), Document(page_content='Goodmorning! $50 is too low.', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 10, 'sender_name': 'User 2', 'timestamp_ms': 1675577876645}), Document(page_content='Hi! Im interested in your bag. Im offering $50. Let me know if you are interested. Thanks!', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 11, 'sender_name': 'User 1', 'timestamp_ms': 1675549022673})] Now, you will see that the documents contain the metadata associated with the content we extracted. The `metadata_func`[​](#the-metadata_func "Direct link to the-metadata_func") ----------------------------------------------------------------------------- As shown above, the `metadata_func` accepts the default metadata generated by the `JSONLoader`. This allows full control to the user with respect to how the metadata is formatted. For example, the default metadata contains the `source` and the `seq_num` keys. However, it is possible that the JSON data contain these keys as well. The user can then exploit the `metadata_func` to rename the default keys and use the ones from the JSON data. The example below shows how we can modify the `source` to only contain information of the file source relative to the `langchain` directory. # Define the metadata extraction function.def metadata_func(record: dict, metadata: dict) -> dict: metadata["sender_name"] = record.get("sender_name") metadata["timestamp_ms"] = record.get("timestamp_ms") if "source" in metadata: source = metadata["source"].split("/") source = source[source.index("langchain"):] metadata["source"] = "/".join(source) return metadataloader = JSONLoader( file_path='./example_data/facebook_chat.json', jq_schema='.messages[]', content_key="content", metadata_func=metadata_func)data = loader.load() pprint(data) [Document(page_content='Bye!', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 1, 'sender_name': 'User 2', 'timestamp_ms': 1675597571851}), Document(page_content='Oh no worries! Bye', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 2, 'sender_name': 'User 1', 'timestamp_ms': 1675597435669}), Document(page_content='No Im sorry it was my mistake, the blue one is not for sale', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 3, 'sender_name': 'User 2', 'timestamp_ms': 1675596277579}), Document(page_content='I thought you were selling the blue one!', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 4, 'sender_name': 'User 1', 'timestamp_ms': 1675595140251}), Document(page_content='Im not interested in this bag. Im interested in the blue one!', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 5, 'sender_name': 'User 1', 'timestamp_ms': 1675595109305}), Document(page_content='Here is $129', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 6, 'sender_name': 'User 2', 'timestamp_ms': 1675595068468}), Document(page_content='', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 7, 'sender_name': 'User 2', 'timestamp_ms': 1675595060730}), Document(page_content='Online is at least $100', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 8, 'sender_name': 'User 2', 'timestamp_ms': 1675595045152}), Document(page_content='How much do you want?', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 9, 'sender_name': 'User 1', 'timestamp_ms': 1675594799696}), Document(page_content='Goodmorning! $50 is too low.', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 10, 'sender_name': 'User 2', 'timestamp_ms': 1675577876645}), Document(page_content='Hi! Im interested in your bag. Im offering $50. Let me know if you are interested. Thanks!', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 11, 'sender_name': 'User 1', 'timestamp_ms': 1675549022673})] Common JSON structures with jq schema[​](#common-json-structures-with-jq-schema "Direct link to Common JSON structures with jq schema") --------------------------------------------------------------------------------------------------------------------------------------- The list below provides a reference to the possible `jq_schema` the user can use to extract content from the JSON data depending on the structure. JSON -> [{"text": ...}, {"text": ...}, {"text": ...}]jq_schema -> ".[].text"JSON -> {"key": [{"text": ...}, {"text": ...}, {"text": ...}]}jq_schema -> ".key[].text"JSON -> ["...", "...", "..."]jq_schema -> ".[]" [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/document_loader_json.mdx) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to load HTML ](/v0.2/docs/how_to/document_loader_html/)[ Next How to load Markdown ](/v0.2/docs/how_to/document_loader_markdown/) * [Using `JSONLoader`](#using-jsonloader) * [JSON file](#json-file) * [JSON Lines file](#json-lines-file) * [JSON file with jq schema `content_key`](#json-file-with-jq-schema-content_key) * [Extracting metadata](#extracting-metadata) * [The `metadata_func`](#the-metadata_func) * [Common JSON structures with jq schema](#common-json-structures-with-jq-schema)
null
https://python.langchain.com/v0.2/docs/how_to/character_text_splitter/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to split by character How to split by character ========================= This is the simplest method. This splits based on a given character sequence, which defaults to `"\n\n"`. Chunk length is measured by number of characters. 1. How the text is split: by single character separator. 2. How the chunk size is measured: by number of characters. To obtain the string content directly, use `.split_text`. To create LangChain [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) objects (e.g., for use in downstream tasks), use `.create_documents`. %pip install -qU langchain-text-splitters from langchain_text_splitters import CharacterTextSplitter# Load an example documentwith open("state_of_the_union.txt") as f: state_of_the_union = f.read()text_splitter = CharacterTextSplitter( separator="\n\n", chunk_size=1000, chunk_overlap=200, length_function=len, is_separator_regex=False,)texts = text_splitter.create_documents([state_of_the_union])print(texts[0]) **API Reference:**[CharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.CharacterTextSplitter.html) page_content='Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n\nHe met the Ukrainian people. \n\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world.' Use `.create_documents` to propagate metadata associated with each document to the output chunks: metadatas = [{"document": 1}, {"document": 2}]documents = text_splitter.create_documents( [state_of_the_union, state_of_the_union], metadatas=metadatas)print(documents[0]) page_content='Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n\nHe met the Ukrainian people. \n\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world.' metadata={'document': 1} Use `.split_text` to obtain the string content directly: text_splitter.split_text(state_of_the_union)[0] 'Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n\nHe met the Ukrainian people. \n\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world.' [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/character_text_splitter.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to pass callbacks in at runtime ](/v0.2/docs/how_to/callbacks_runtime/)[ Next How to cache chat model responses ](/v0.2/docs/how_to/chat_model_caching/)
null
https://python.langchain.com/v0.2/docs/how_to/chat_model_caching/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to cache chat model responses On this page How to cache chat model responses ================================= Prerequisites This guide assumes familiarity with the following concepts: * [Chat models](/v0.2/docs/concepts/#chat-models) * [LLMs](/v0.2/docs/concepts/#llms) LangChain provides an optional caching layer for chat models. This is useful for two main reasons: * It can save you money by reducing the number of API calls you make to the LLM provider, if you're often requesting the same completion multiple times. This is especially useful during app development. * It can speed up your application by reducing the number of API calls you make to the LLM provider. This guide will walk you through how to enable this in your apps. * OpenAI * Anthropic * Azure * Google * Cohere * FireworksAI * Groq * MistralAI * TogetherAI pip install -qU langchain-openai import getpassimport osos.environ["OPENAI_API_KEY"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI(model="gpt-3.5-turbo-0125") pip install -qU langchain-anthropic import getpassimport osos.environ["ANTHROPIC_API_KEY"] = getpass.getpass()from langchain_anthropic import ChatAnthropicllm = ChatAnthropic(model="claude-3-sonnet-20240229") pip install -qU langchain-openai import getpassimport osos.environ["AZURE_OPENAI_API_KEY"] = getpass.getpass()from langchain_openai import AzureChatOpenAIllm = AzureChatOpenAI( azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"], openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],) pip install -qU langchain-google-vertexai import getpassimport osos.environ["GOOGLE_API_KEY"] = getpass.getpass()from langchain_google_vertexai import ChatVertexAIllm = ChatVertexAI(model="gemini-pro") pip install -qU langchain-cohere import getpassimport osos.environ["COHERE_API_KEY"] = getpass.getpass()from langchain_cohere import ChatCoherellm = ChatCohere(model="command-r") pip install -qU langchain-fireworks import getpassimport osos.environ["FIREWORKS_API_KEY"] = getpass.getpass()from langchain_fireworks import ChatFireworksllm = ChatFireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct") pip install -qU langchain-groq import getpassimport osos.environ["GROQ_API_KEY"] = getpass.getpass()from langchain_groq import ChatGroqllm = ChatGroq(model="llama3-8b-8192") pip install -qU langchain-mistralai import getpassimport osos.environ["MISTRAL_API_KEY"] = getpass.getpass()from langchain_mistralai import ChatMistralAIllm = ChatMistralAI(model="mistral-large-latest") pip install -qU langchain-openai import getpassimport osos.environ["TOGETHER_API_KEY"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI( base_url="https://api.together.xyz/v1", api_key=os.environ["TOGETHER_API_KEY"], model="mistralai/Mixtral-8x7B-Instruct-v0.1",) # <!-- ruff: noqa: F821 -->from langchain.globals import set_llm_cache **API Reference:**[set\_llm\_cache](https://api.python.langchain.com/en/latest/globals/langchain.globals.set_llm_cache.html) In Memory Cache[​](#in-memory-cache "Direct link to In Memory Cache") --------------------------------------------------------------------- This is an ephemeral cache that stores model calls in memory. It will be wiped when your environment restarts, and is not shared across processes. %%timefrom langchain.cache import InMemoryCacheset_llm_cache(InMemoryCache())# The first time, it is not yet in cache, so it should take longerllm.invoke("Tell me a joke") **API Reference:**[InMemoryCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.InMemoryCache.html) CPU times: user 645 ms, sys: 214 ms, total: 859 msWall time: 829 ms AIMessage(content="Why don't scientists trust atoms?\n\nBecause they make up everything!", response_metadata={'token_usage': {'completion_tokens': 13, 'prompt_tokens': 11, 'total_tokens': 24}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_c2295e73ad', 'finish_reason': 'stop', 'logprobs': None}, id='run-b6836bdd-8c30-436b-828f-0ac5fc9ab50e-0') %%time# The second time it is, so it goes fasterllm.invoke("Tell me a joke") CPU times: user 822 µs, sys: 288 µs, total: 1.11 msWall time: 1.06 ms AIMessage(content="Why don't scientists trust atoms?\n\nBecause they make up everything!", response_metadata={'token_usage': {'completion_tokens': 13, 'prompt_tokens': 11, 'total_tokens': 24}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_c2295e73ad', 'finish_reason': 'stop', 'logprobs': None}, id='run-b6836bdd-8c30-436b-828f-0ac5fc9ab50e-0') SQLite Cache[​](#sqlite-cache "Direct link to SQLite Cache") ------------------------------------------------------------ This cache implementation uses a `SQLite` database to store responses, and will last across process restarts. !rm .langchain.db # We can do the same thing with a SQLite cachefrom langchain_community.cache import SQLiteCacheset_llm_cache(SQLiteCache(database_path=".langchain.db")) **API Reference:**[SQLiteCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.SQLiteCache.html) %%time# The first time, it is not yet in cache, so it should take longerllm.invoke("Tell me a joke") CPU times: user 9.91 ms, sys: 7.68 ms, total: 17.6 msWall time: 657 ms AIMessage(content='Why did the scarecrow win an award? Because he was outstanding in his field!', response_metadata={'token_usage': {'completion_tokens': 17, 'prompt_tokens': 11, 'total_tokens': 28}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_c2295e73ad', 'finish_reason': 'stop', 'logprobs': None}, id='run-39d9e1e8-7766-4970-b1d8-f50213fd94c5-0') %%time# The second time it is, so it goes fasterllm.invoke("Tell me a joke") CPU times: user 52.2 ms, sys: 60.5 ms, total: 113 msWall time: 127 ms AIMessage(content='Why did the scarecrow win an award? Because he was outstanding in his field!', id='run-39d9e1e8-7766-4970-b1d8-f50213fd94c5-0') Next steps[​](#next-steps "Direct link to Next steps") ------------------------------------------------------ You've now learned how to cache model responses to save time and money. Next, check out the other how-to guides chat models in this section, like [how to get a model to return structured output](/v0.2/docs/how_to/structured_output/) or [how to create your own custom chat model](/v0.2/docs/how_to/custom_chat_model/). [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/chat_model_caching.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to split by character ](/v0.2/docs/how_to/character_text_splitter/)[ Next How to init any model in one line ](/v0.2/docs/how_to/chat_models_universal_init/) * [In Memory Cache](#in-memory-cache) * [SQLite Cache](#sqlite-cache) * [Next steps](#next-steps)
null
https://python.langchain.com/v0.2/docs/how_to/chat_models_universal_init/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to init any model in one line On this page How to init any model in one line ================================= Many LLM applications let end users specify what model provider and model they want the application to be powered by. This requires writing some logic to initialize different ChatModels based on some user configuration. The `init_chat_model()` helper method makes it easy to initialize a number of different model integrations without having to worry about import paths and class names. Supported models See the [init\_chat\_model()](https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.base.init_chat_model.html) API reference for a full list of supported integrations. Make sure you have the integration packages installed for any model providers you want to support. E.g. you should have `langchain-openai` installed to init an OpenAI model. %pip install -qU langchain langchain-openai langchain-anthropic langchain-google-vertexai Basic usage[​](#basic-usage "Direct link to Basic usage") --------------------------------------------------------- from langchain.chat_models import init_chat_model# Returns a langchain_openai.ChatOpenAI instance.gpt_4o = init_chat_model("gpt-4o", model_provider="openai", temperature=0)# Returns a langchain_anthropic.ChatAnthropic instance.claude_opus = init_chat_model( "claude-3-opus-20240229", model_provider="anthropic", temperature=0)# Returns a langchain_google_vertexai.ChatVertexAI instance.gemini_15 = init_chat_model( "gemini-1.5-pro", model_provider="google_vertexai", temperature=0)# Since all model integrations implement the ChatModel interface, you can use them in the same way.print("GPT-4o: " + gpt_4o.invoke("what's your name").content + "\n")print("Claude Opus: " + claude_opus.invoke("what's your name").content + "\n")print("Gemini 1.5: " + gemini_15.invoke("what's your name").content + "\n") **API Reference:**[init\_chat\_model](https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.base.init_chat_model.html) GPT-4o: I'm an AI created by OpenAI, and I don't have a personal name. You can call me Assistant! How can I help you today?Claude Opus: My name is Claude. It's nice to meet you!Gemini 1.5: I am a large language model, trained by Google. I do not have a name. Simple config example[​](#simple-config-example "Direct link to Simple config example") --------------------------------------------------------------------------------------- user_config = { "model": "...user-specified...", "model_provider": "...user-specified...", "temperature": 0, "max_tokens": 1000,}llm = init_chat_model(**user_config)llm.invoke("what's your name") Inferring model provider[​](#inferring-model-provider "Direct link to Inferring model provider") ------------------------------------------------------------------------------------------------ For common and distinct model names `init_chat_model()` will attempt to infer the model provider. See the [API reference](https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.base.init_chat_model.html) for a full list of inference behavior. E.g. any model that starts with `gpt-3...` or `gpt-4...` will be inferred as using model provider `openai`. gpt_4o = init_chat_model("gpt-4o", temperature=0)claude_opus = init_chat_model("claude-3-opus-20240229", temperature=0)gemini_15 = init_chat_model("gemini-1.5-pro", temperature=0) [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/chat_models_universal_init.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to cache chat model responses ](/v0.2/docs/how_to/chat_model_caching/)[ Next How to track token usage in ChatModels ](/v0.2/docs/how_to/chat_token_usage_tracking/) * [Basic usage](#basic-usage) * [Simple config example](#simple-config-example) * [Inferring model provider](#inferring-model-provider)
null
https://python.langchain.com/v0.2/docs/how_to/document_loader_pdf/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to load PDFs On this page How to load PDFs ================ [Portable Document Format (PDF)](https://en.wikipedia.org/wiki/PDF), standardized as ISO 32000, is a file format developed by Adobe in 1992 to present documents, including text formatting and images, in a manner independent of application software, hardware, and operating systems. This guide covers how to load `PDF` documents into the LangChain [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) format that we use downstream. LangChain integrates with a host of PDF parsers. Some are simple and relatively low-level; others will support OCR and image-processing, or perform advanced document layout analysis. The right choice will depend on your application. Below we enumerate the possibilities. Using PyPDF[​](#using-pypdf "Direct link to Using PyPDF") --------------------------------------------------------- Here we load a PDF using `pypdf` into array of documents, where each document contains the page content and metadata with `page` number. %pip install pypdf from langchain_community.document_loaders import PyPDFLoaderfile_path = ( "../../../docs/integrations/document_loaders/example_data/layout-parser-paper.pdf")loader = PyPDFLoader(file_path)pages = loader.load_and_split()pages[0] **API Reference:**[PyPDFLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.PyPDFLoader.html) Document(page_content='LayoutParser : A Unified Toolkit for Deep\nLearning Based Document Image Analysis\nZejiang Shen1( \x00), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain\nLee4, Jacob Carlson3, and Weining Li5\n1Allen Institute for AI\[email protected]\n2Brown University\nruochen [email protected]\n3Harvard University\n{melissadell,jacob carlson }@fas.harvard.edu\n4University of Washington\[email protected]\n5University of Waterloo\[email protected]\nAbstract. Recent advances in document image analysis (DIA) have been\nprimarily driven by the application of neural networks. Ideally, research\noutcomes could be easily deployed in production and extended for further\ninvestigation. However, various factors like loosely organized codebases\nand sophisticated model configurations complicate the easy reuse of im-\nportant innovations by a wide audience. Though there have been on-going\nefforts to improve reusability and simplify deep learning (DL) model\ndevelopment in disciplines like natural language processing and computer\nvision, none of them are optimized for challenges in the domain of DIA.\nThis represents a major gap in the existing toolkit, as DIA is central to\nacademic research across a wide range of disciplines in the social sciences\nand humanities. This paper introduces LayoutParser , an open-source\nlibrary for streamlining the usage of DL in DIA research and applica-\ntions. The core LayoutParser library comes with a set of simple and\nintuitive interfaces for applying and customizing DL models for layout de-\ntection, character recognition, and many other document processing tasks.\nTo promote extensibility, LayoutParser also incorporates a community\nplatform for sharing both pre-trained models and full document digiti-\nzation pipelines. We demonstrate that LayoutParser is helpful for both\nlightweight and large-scale digitization pipelines in real-word use cases.\nThe library is publicly available at https://layout-parser.github.io .\nKeywords: Document Image Analysis ·Deep Learning ·Layout Analysis\n·Character Recognition ·Open Source library ·Toolkit.\n1 Introduction\nDeep Learning(DL)-based approaches are the state-of-the-art for a wide range of\ndocument image analysis (DIA) tasks including document image classification [ 11,arXiv:2103.15348v2 [cs.CV] 21 Jun 2021', metadata={'source': '../../../docs/integrations/document_loaders/example_data/layout-parser-paper.pdf', 'page': 0}) An advantage of this approach is that documents can be retrieved with page numbers. ### Vector search over PDFs[​](#vector-search-over-pdfs "Direct link to Vector search over PDFs") Once we have loaded PDFs into LangChain `Document` objects, we can index them (e.g., a RAG application) in the usual way: %pip install faiss-cpu # use `pip install faiss-gpu` for CUDA GPU support import getpassimport osos.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") from langchain_community.vectorstores import FAISSfrom langchain_openai import OpenAIEmbeddingsfaiss_index = FAISS.from_documents(pages, OpenAIEmbeddings())docs = faiss_index.similarity_search("What is LayoutParser?", k=2)for doc in docs: print(str(doc.metadata["page"]) + ":", doc.page_content[:300]) **API Reference:**[FAISS](https://api.python.langchain.com/en/latest/vectorstores/langchain_community.vectorstores.faiss.FAISS.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) 13: 14 Z. Shen et al.6 ConclusionLayoutParser provides a comprehensive toolkit for deep learning-based documentimage analysis. The off-the-shelf library is easy to install, and can be used tobuild flexible and accurate pipelines for processing documents with complicatedstructures. It also supports hi0: LayoutParser : A Unified Toolkit for DeepLearning Based Document Image AnalysisZejiang Shen1( ), Ruochen Zhang2, Melissa Dell3, Benjamin Charles GermainLee4, Jacob Carlson3, and Weining Li51Allen Institute for [email protected] Universityruochen [email protected] University ### Extract text from images[​](#extract-text-from-images "Direct link to Extract text from images") Some PDFs contain images of text-- e.g., within scanned documents, or figures. Using the `rapidocr-onnxruntime` package we can extract images as text as well: %pip install rapidocr-onnxruntime loader = PyPDFLoader("https://arxiv.org/pdf/2103.15348.pdf", extract_images=True)pages = loader.load()pages[4].page_content 'LayoutParser : A Unified Toolkit for DL-Based DIA 5\nTable 1: Current layout detection models in the LayoutParser model zoo\nDataset Base Model1Large Model Notes\nPubLayNet [38] F / M M Layouts of modern scientific documents\nPRImA [3] M - Layouts of scanned modern magazines and scientific reports\nNewspaper [17] F - Layouts of scanned US newspapers from the 20th century\nTableBank [18] F F Table region on modern scientific and business document\nHJDataset [31] F / M - Layouts of history Japanese documents\n1For each dataset, we train several models of different sizes for different needs (the trade-off between accuracy\nvs. computational cost). For “base model” and “large model”, we refer to using the ResNet 50 or ResNet 101\nbackbones [ 13], respectively. One can train models of different architectures, like Faster R-CNN [ 28] (F) and Mask\nR-CNN [ 12] (M). For example, an F in the Large Model column indicates it has a Faster R-CNN model trained\nusing the ResNet 101 backbone. The platform is maintained and a number of additions will be made to the model\nzoo in coming months.\nlayout data structures , which are optimized for efficiency and versatility. 3) When\nnecessary, users can employ existing or customized OCR models via the unified\nAPI provided in the OCR module . 4)LayoutParser comes with a set of utility\nfunctions for the visualization and storage of the layout data. 5) LayoutParser\nis also highly customizable, via its integration with functions for layout data\nannotation and model training . We now provide detailed descriptions for each\ncomponent.\n3.1 Layout Detection Models\nInLayoutParser , a layout model takes a document image as an input and\ngenerates a list of rectangular boxes for the target content regions. Different\nfrom traditional methods, it relies on deep convolutional neural networks rather\nthan manually curated rules to identify content regions. It is formulated as an\nobject detection problem and state-of-the-art models like Faster R-CNN [ 28] and\nMask R-CNN [ 12] are used. This yields prediction results of high accuracy and\nmakes it possible to build a concise, generalized interface for layout detection.\nLayoutParser , built upon Detectron2 [ 35], provides a minimal API that can\nperform layout detection with only four lines of code in Python:\n1import layoutparser as lp\n2image = cv2. imread (" image_file ") # load images\n3model = lp. Detectron2LayoutModel (\n4 "lp :// PubLayNet / faster_rcnn_R_50_FPN_3x / config ")\n5layout = model . detect ( image )\nLayoutParser provides a wealth of pre-trained model weights using various\ndatasets covering different languages, time periods, and document types. Due to\ndomain shift [ 7], the prediction performance can notably drop when models are ap-\nplied to target samples that are significantly different from the training dataset. As\ndocument structures and layouts vary greatly in different domains, it is important\nto select models trained on a dataset similar to the test samples. A semantic syntax\nis used for initializing the model weights in LayoutParser , using both the dataset\nname and model name lp://<dataset-name>/<model-architecture-name> .' Using PyMuPDF[​](#using-pymupdf "Direct link to Using PyMuPDF") --------------------------------------------------------------- This is the fastest of the PDF parsing options, and contains detailed metadata about the PDF and its pages, as well as returns one document per page. from langchain_community.document_loaders import PyMuPDFLoaderloader = PyMuPDFLoader("example_data/layout-parser-paper.pdf")data = loader.load()data[0] **API Reference:**[PyMuPDFLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.PyMuPDFLoader.html) Additionally, you can pass along any of the options from the [PyMuPDF documentation](https://pymupdf.readthedocs.io/en/latest/app1.html#plain-text/) as keyword arguments in the `load` call, and it will be pass along to the `get_text()` call. Using MathPix[​](#using-mathpix "Direct link to Using MathPix") --------------------------------------------------------------- Inspired by Daniel Gross's [https://gist.github.com/danielgross/3ab4104e14faccc12b49200843adab21](https://gist.github.com/danielgross/3ab4104e14faccc12b49200843adab21) from langchain_community.document_loaders import MathpixPDFLoaderfile_path = ( "../../../docs/integrations/document_loaders/example_data/layout-parser-paper.pdf")loader = MathpixPDFLoader(file_path)data = loader.load() **API Reference:**[MathpixPDFLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.MathpixPDFLoader.html) Using Unstructured[​](#using-unstructured "Direct link to Using Unstructured") ------------------------------------------------------------------------------ [Unstructured](https://unstructured-io.github.io/unstructured/) supports a common interface for working with unstructured or semi-structured file formats, such as Markdown or PDF. LangChain's [UnstructuredPDFLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.UnstructuredPDFLoader.html) integrates with Unstructured to parse PDF documents into LangChain [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) objects. from langchain_community.document_loaders import UnstructuredPDFLoaderfile_path = ( "../../../docs/integrations/document_loaders/example_data/layout-parser-paper.pdf")loader = UnstructuredPDFLoader(file_path)data = loader.load() **API Reference:**[UnstructuredPDFLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.UnstructuredPDFLoader.html) ### Retain Elements[​](#retain-elements "Direct link to Retain Elements") Under the hood, Unstructured creates different "elements" for different chunks of text. By default we combine those together, but you can easily keep that separation by specifying `mode="elements"`. file_path = ( "../../../docs/integrations/document_loaders/example_data/layout-parser-paper.pdf")loader = UnstructuredPDFLoader(file_path, mode="elements")data = loader.load()data[0] Document(page_content='1 2 0 2', metadata={'source': '../../../docs/integrations/document_loaders/example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((16.34, 213.36), (16.34, 253.36), (36.34, 253.36), (36.34, 213.36)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': '../../../docs/integrations/document_loaders/example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2024-03-18T13:22:22', 'page_number': 1, 'filetype': 'application/pdf', 'category': 'UncategorizedText'}) See the full set of element types for this particular document: set(doc.metadata["category"] for doc in data) {'ListItem', 'NarrativeText', 'Title', 'UncategorizedText'} ### Fetching remote PDFs using Unstructured[​](#fetching-remote-pdfs-using-unstructured "Direct link to Fetching remote PDFs using Unstructured") This covers how to load online PDFs into a document format that we can use downstream. This can be used for various online PDF sites such as [https://open.umn.edu/opentextbooks/textbooks/](https://open.umn.edu/opentextbooks/textbooks/) and [https://arxiv.org/archive/](https://arxiv.org/archive/) Note: all other PDF loaders can also be used to fetch remote PDFs, but `OnlinePDFLoader` is a legacy function, and works specifically with `UnstructuredPDFLoader`. from langchain_community.document_loaders import OnlinePDFLoaderloader = OnlinePDFLoader("https://arxiv.org/pdf/2302.03803.pdf")data = loader.load() **API Reference:**[OnlinePDFLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.OnlinePDFLoader.html) Using PyPDFium2[​](#using-pypdfium2 "Direct link to Using PyPDFium2") --------------------------------------------------------------------- from langchain_community.document_loaders import PyPDFium2Loaderfile_path = ( "../../../docs/integrations/document_loaders/example_data/layout-parser-paper.pdf")loader = PyPDFium2Loader(file_path)data = loader.load() **API Reference:**[PyPDFium2Loader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.PyPDFium2Loader.html) Using PDFMiner[​](#using-pdfminer "Direct link to Using PDFMiner") ------------------------------------------------------------------ from langchain_community.document_loaders import PDFMinerLoaderfile_path = ( "../../../docs/integrations/document_loaders/example_data/layout-parser-paper.pdf")loader = PDFMinerLoader(file_path)data = loader.load() **API Reference:**[PDFMinerLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.PDFMinerLoader.html) ### Using PDFMiner to generate HTML text[​](#using-pdfminer-to-generate-html-text "Direct link to Using PDFMiner to generate HTML text") This can be helpful for chunking texts semantically into sections as the output html content can be parsed via `BeautifulSoup` to get more structured and rich information about font size, page numbers, PDF headers/footers, etc. from langchain_community.document_loaders import PDFMinerPDFasHTMLLoaderfile_path = ( "../../../docs/integrations/document_loaders/example_data/layout-parser-paper.pdf")loader = PDFMinerPDFasHTMLLoader(file_path)data = loader.load()[0] **API Reference:**[PDFMinerPDFasHTMLLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.PDFMinerPDFasHTMLLoader.html) from bs4 import BeautifulSoupsoup = BeautifulSoup(data.page_content, "html.parser")content = soup.find_all("div") import recur_fs = Nonecur_text = ""snippets = [] # first collect all snippets that have the same font sizefor c in content: sp = c.find("span") if not sp: continue st = sp.get("style") if not st: continue fs = re.findall("font-size:(\d+)px", st) if not fs: continue fs = int(fs[0]) if not cur_fs: cur_fs = fs if fs == cur_fs: cur_text += c.text else: snippets.append((cur_text, cur_fs)) cur_fs = fs cur_text = c.textsnippets.append((cur_text, cur_fs))# Note: The above logic is very straightforward. One can also add more strategies such as removing duplicate snippets (as# headers/footers in a PDF appear on multiple pages so if we find duplicates it's safe to assume that it is redundant info) from langchain_core.documents import Documentcur_idx = -1semantic_snippets = []# Assumption: headings have higher font size than their respective contentfor s in snippets: # if current snippet's font size > previous section's heading => it is a new heading if ( not semantic_snippets or s[1] > semantic_snippets[cur_idx].metadata["heading_font"] ): metadata = {"heading": s[0], "content_font": 0, "heading_font": s[1]} metadata.update(data.metadata) semantic_snippets.append(Document(page_content="", metadata=metadata)) cur_idx += 1 continue # if current snippet's font size <= previous section's content => content belongs to the same section (one can also create # a tree like structure for sub sections if needed but that may require some more thinking and may be data specific) if ( not semantic_snippets[cur_idx].metadata["content_font"] or s[1] <= semantic_snippets[cur_idx].metadata["content_font"] ): semantic_snippets[cur_idx].page_content += s[0] semantic_snippets[cur_idx].metadata["content_font"] = max( s[1], semantic_snippets[cur_idx].metadata["content_font"] ) continue # if current snippet's font size > previous section's content but less than previous section's heading than also make a new # section (e.g. title of a PDF will have the highest font size but we don't want it to subsume all sections) metadata = {"heading": s[0], "content_font": 0, "heading_font": s[1]} metadata.update(data.metadata) semantic_snippets.append(Document(page_content="", metadata=metadata)) cur_idx += 1 **API Reference:**[Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) semantic_snippets[4] Document(page_content='Recently, various DL models and datasets have been developed for layout analysis\ntasks. The dhSegment [22] utilizes fully convolutional networks [20] for segmen-\ntation tasks on historical documents. Object detection-based methods like Faster\nR-CNN [28] and Mask R-CNN [12] are used for identifying document elements [38]\nand detecting tables [30, 26]. Most recently, Graph Neural Networks [29] have also\nbeen used in table detection [27]. However, these models are usually implemented\nindividually and there is no unified framework to load and use such models.\nThere has been a surge of interest in creating open-source tools for document\nimage processing: a search of document image analysis in Github leads to 5M\nrelevant code pieces 6; yet most of them rely on traditional rule-based methods\nor provide limited functionalities. The closest prior research to our work is the\nOCR-D project7, which also tries to build a complete toolkit for DIA. However,\nsimilar to the platform developed by Neudecker et al. [21], it is designed for\nanalyzing historical documents, and provides no supports for recent DL models.\nThe DocumentLayoutAnalysis project8 focuses on processing born-digital PDF\ndocuments via analyzing the stored PDF data. Repositories like DeepLayout9\nand Detectron2-PubLayNet10 are individual deep learning models trained on\nlayout analysis datasets without support for the full DIA pipeline. The Document\nAnalysis and Exploitation (DAE) platform [15] and the DeepDIVA project [2]\naim to improve the reproducibility of DIA methods (or DL models), yet they\nare not actively maintained. OCR engines like Tesseract [14], easyOCR11 and\npaddleOCR12 usually do not come with comprehensive functionalities for other\nDIA tasks like layout analysis.\nRecent years have also seen numerous efforts to create libraries for promoting\nreproducibility and reusability in the field of DL. Libraries like Dectectron2 [35],\n6 The number shown is obtained by specifying the search type as ‘code’.\n7 https://ocr-d.de/en/about\n8 https://github.com/BobLd/DocumentLayoutAnalysis\n9 https://github.com/leonlulu/DeepLayout\n10 https://github.com/hpanwar08/detectron2\n11 https://github.com/JaidedAI/EasyOCR\n12 https://github.com/PaddlePaddle/PaddleOCR\n4\nZ. Shen et al.\nFig. 1: The overall architecture of LayoutParser. For an input document image,\nthe core LayoutParser library provides a set of off-the-shelf tools for layout\ndetection, OCR, visualization, and storage, backed by a carefully designed layout\ndata structure. LayoutParser also supports high level customization via efficient\nlayout annotation and model training functions. These improve model accuracy\non the target samples. The community platform enables the easy sharing of DIA\nmodels and whole digitization pipelines to promote reusability and reproducibility.\nA collection of detailed documentation, tutorials and exemplar projects make\nLayoutParser easy to learn and use.\nAllenNLP [8] and transformers [34] have provided the community with complete\nDL-based support for developing and deploying models for general computer\nvision and natural language processing problems. LayoutParser, on the other\nhand, specializes specifically in DIA tasks. LayoutParser is also equipped with a\ncommunity platform inspired by established model hubs such as Torch Hub [23]\nand TensorFlow Hub [1]. It enables the sharing of pretrained models as well as\nfull document processing pipelines that are unique to DIA tasks.\nThere have been a variety of document data collections to facilitate the\ndevelopment of DL models. Some examples include PRImA [3](magazine layouts),\nPubLayNet [38](academic paper layouts), Table Bank [18](tables in academic\npapers), Newspaper Navigator Dataset [16, 17](newspaper figure layouts) and\nHJDataset [31](historical Japanese document layouts). A spectrum of models\ntrained on these datasets are currently available in the LayoutParser model zoo\nto support different use cases.\n', metadata={'heading': '2 Related Work\n', 'content_font': 9, 'heading_font': 11, 'source': '../../../docs/integrations/document_loaders/example_data/layout-parser-paper.pdf'}) PyPDF Directory[​](#pypdf-directory "Direct link to PyPDF Directory") --------------------------------------------------------------------- Load PDFs from directory from langchain_community.document_loaders import PyPDFDirectoryLoader **API Reference:**[PyPDFDirectoryLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.PyPDFDirectoryLoader.html) directory_path = "../../../docs/integrations/document_loaders/example_data/"loader = PyPDFDirectoryLoader("example_data/")docs = loader.load() Using PDFPlumber[​](#using-pdfplumber "Direct link to Using PDFPlumber") ------------------------------------------------------------------------ Like PyMuPDF, the output Documents contain detailed metadata about the PDF and its pages, and returns one document per page. from langchain_community.document_loaders import PDFPlumberLoaderdata = loader.load()data[0] **API Reference:**[PDFPlumberLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.PDFPlumberLoader.html) Using AmazonTextractPDFParser[​](#using-amazontextractpdfparser "Direct link to Using AmazonTextractPDFParser") --------------------------------------------------------------------------------------------------------------- The AmazonTextractPDFLoader calls the [Amazon Textract Service](https://aws.amazon.com/textract/) to convert PDFs into a Document structure. The loader does pure OCR at the moment, with more features like layout support planned, depending on demand. Single and multi-page documents are supported with up to 3000 pages and 512 MB of size. For the call to be successful an AWS account is required, similar to the [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html) requirements. Besides the AWS configuration, it is very similar to the other PDF loaders, while also supporting JPEG, PNG and TIFF and non-native PDF formats. from langchain_community.document_loaders import AmazonTextractPDFLoaderloader = AmazonTextractPDFLoader("example_data/alejandro_rosalez_sample-small.jpeg")documents = loader.load() **API Reference:**[AmazonTextractPDFLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.AmazonTextractPDFLoader.html) Using AzureAIDocumentIntelligenceLoader[​](#using-azureaidocumentintelligenceloader "Direct link to Using AzureAIDocumentIntelligenceLoader") --------------------------------------------------------------------------------------------------------------------------------------------- [Azure AI Document Intelligence](https://aka.ms/doc-intelligence) (formerly known as `Azure Form Recognizer`) is machine-learning based service that extracts texts (including handwriting), tables, document structures (e.g., titles, section headings, etc.) and key-value-pairs from digital or scanned PDFs, images, Office and HTML files. Document Intelligence supports `PDF`, `JPEG/JPG`, `PNG`, `BMP`, `TIFF`, `HEIF`, `DOCX`, `XLSX`, `PPTX` and `HTML`. This [current implementation](https://aka.ms/di-langchain) of a loader using `Document Intelligence` can incorporate content page-wise and turn it into LangChain documents. The default output format is markdown, which can be easily chained with `MarkdownHeaderTextSplitter` for semantic document chunking. You can also use `mode="single"` or `mode="page"` to return pure texts in a single page or document split by page. ### Prerequisite[​](#prerequisite "Direct link to Prerequisite") An Azure AI Document Intelligence resource in one of the 3 preview regions: **East US**, **West US2**, **West Europe** - follow [this document](https://learn.microsoft.com/azure/ai-services/document-intelligence/create-document-intelligence-resource?view=doc-intel-4.0.0) to create one if you don't have. You will be passing `<endpoint>` and `<key>` as parameters to the loader. %pip install --upgrade --quiet langchain langchain-community azure-ai-documentintelligence from langchain_community.document_loaders import AzureAIDocumentIntelligenceLoaderfile_path = "<filepath>"endpoint = "<endpoint>"key = "<key>"loader = AzureAIDocumentIntelligenceLoader( api_endpoint=endpoint, api_key=key, file_path=file_path, api_model="prebuilt-layout")documents = loader.load() **API Reference:**[AzureAIDocumentIntelligenceLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.doc_intelligence.AzureAIDocumentIntelligenceLoader.html) [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/document_loader_pdf.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to load Microsoft Office files ](/v0.2/docs/how_to/document_loader_office_file/)[ Next How to create a dynamic (self-constructing) chain ](/v0.2/docs/how_to/dynamic_chain/) * [Using PyPDF](#using-pypdf) * [Vector search over PDFs](#vector-search-over-pdfs) * [Extract text from images](#extract-text-from-images) * [Using PyMuPDF](#using-pymupdf) * [Using MathPix](#using-mathpix) * [Using Unstructured](#using-unstructured) * [Retain Elements](#retain-elements) * [Fetching remote PDFs using Unstructured](#fetching-remote-pdfs-using-unstructured) * [Using PyPDFium2](#using-pypdfium2) * [Using PDFMiner](#using-pdfminer) * [Using PDFMiner to generate HTML text](#using-pdfminer-to-generate-html-text) * [PyPDF Directory](#pypdf-directory) * [Using PDFPlumber](#using-pdfplumber) * [Using AmazonTextractPDFParser](#using-amazontextractpdfparser) * [Using AzureAIDocumentIntelligenceLoader](#using-azureaidocumentintelligenceloader) * [Prerequisite](#prerequisite)
null
https://python.langchain.com/v0.2/docs/how_to/document_loader_office_file/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to load Microsoft Office files On this page How to load Microsoft Office files ================================== The [Microsoft Office](https://www.office.com/) suite of productivity software includes Microsoft Word, Microsoft Excel, Microsoft PowerPoint, Microsoft Outlook, and Microsoft OneNote. It is available for Microsoft Windows and macOS operating systems. It is also available on Android and iOS. This covers how to load commonly used file formats including `DOCX`, `XLSX` and `PPTX` documents into a LangChain [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) object that we can use downstream. Loading DOCX, XLSX, PPTX with AzureAIDocumentIntelligenceLoader[​](#loading-docx-xlsx-pptx-with-azureaidocumentintelligenceloader "Direct link to Loading DOCX, XLSX, PPTX with AzureAIDocumentIntelligenceLoader") ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- [Azure AI Document Intelligence](https://aka.ms/doc-intelligence) (formerly known as `Azure Form Recognizer`) is machine-learning based service that extracts texts (including handwriting), tables, document structures (e.g., titles, section headings, etc.) and key-value-pairs from digital or scanned PDFs, images, Office and HTML files. Document Intelligence supports `PDF`, `JPEG/JPG`, `PNG`, `BMP`, `TIFF`, `HEIF`, `DOCX`, `XLSX`, `PPTX` and `HTML`. This [current implementation](https://aka.ms/di-langchain) of a loader using `Document Intelligence` can incorporate content page-wise and turn it into LangChain documents. The default output format is markdown, which can be easily chained with `MarkdownHeaderTextSplitter` for semantic document chunking. You can also use `mode="single"` or `mode="page"` to return pure texts in a single page or document split by page. ### Prerequisite[​](#prerequisite "Direct link to Prerequisite") An Azure AI Document Intelligence resource in one of the 3 preview regions: **East US**, **West US2**, **West Europe** - follow [this document](https://learn.microsoft.com/azure/ai-services/document-intelligence/create-document-intelligence-resource?view=doc-intel-4.0.0) to create one if you don't have. You will be passing `<endpoint>` and `<key>` as parameters to the loader. %pip install --upgrade --quiet langchain langchain-community azure-ai-documentintelligencefrom langchain_community.document_loaders import AzureAIDocumentIntelligenceLoaderfile_path = "<filepath>"endpoint = "<endpoint>"key = "<key>"loader = AzureAIDocumentIntelligenceLoader( api_endpoint=endpoint, api_key=key, file_path=file_path, api_model="prebuilt-layout")documents = loader.load() **API Reference:**[AzureAIDocumentIntelligenceLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.doc_intelligence.AzureAIDocumentIntelligenceLoader.html) [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/document_loader_office_file.mdx) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to load Markdown ](/v0.2/docs/how_to/document_loader_markdown/)[ Next How to load PDFs ](/v0.2/docs/how_to/document_loader_pdf/) * [Loading DOCX, XLSX, PPTX with AzureAIDocumentIntelligenceLoader](#loading-docx-xlsx-pptx-with-azureaidocumentintelligenceloader) * [Prerequisite](#prerequisite)
null
https://python.langchain.com/v0.2/docs/how_to/dynamic_chain/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to create a dynamic (self-constructing) chain How to create a dynamic (self-constructing) chain ================================================= Prerequisites This guide assumes familiarity with the following: * [LangChain Expression Language (LCEL)](/v0.2/docs/concepts/#langchain-expression-language) * [How to turn any function into a runnable](/v0.2/docs/how_to/functions/) Sometimes we want to construct parts of a chain at runtime, depending on the chain inputs ([routing](/v0.2/docs/how_to/routing/) is the most common example of this). We can create dynamic chains like this using a very useful property of RunnableLambda's, which is that if a RunnableLambda returns a Runnable, that Runnable is itself invoked. Let's see an example. * OpenAI * Anthropic * Azure * Google * Cohere * FireworksAI * Groq * MistralAI * TogetherAI pip install -qU langchain-openai import getpassimport osos.environ["OPENAI_API_KEY"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI(model="gpt-3.5-turbo-0125") pip install -qU langchain-anthropic import getpassimport osos.environ["ANTHROPIC_API_KEY"] = getpass.getpass()from langchain_anthropic import ChatAnthropicllm = ChatAnthropic(model="claude-3-sonnet-20240229") pip install -qU langchain-openai import getpassimport osos.environ["AZURE_OPENAI_API_KEY"] = getpass.getpass()from langchain_openai import AzureChatOpenAIllm = AzureChatOpenAI( azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"], openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],) pip install -qU langchain-google-vertexai import getpassimport osos.environ["GOOGLE_API_KEY"] = getpass.getpass()from langchain_google_vertexai import ChatVertexAIllm = ChatVertexAI(model="gemini-pro") pip install -qU langchain-cohere import getpassimport osos.environ["COHERE_API_KEY"] = getpass.getpass()from langchain_cohere import ChatCoherellm = ChatCohere(model="command-r") pip install -qU langchain-fireworks import getpassimport osos.environ["FIREWORKS_API_KEY"] = getpass.getpass()from langchain_fireworks import ChatFireworksllm = ChatFireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct") pip install -qU langchain-groq import getpassimport osos.environ["GROQ_API_KEY"] = getpass.getpass()from langchain_groq import ChatGroqllm = ChatGroq(model="llama3-8b-8192") pip install -qU langchain-mistralai import getpassimport osos.environ["MISTRAL_API_KEY"] = getpass.getpass()from langchain_mistralai import ChatMistralAIllm = ChatMistralAI(model="mistral-large-latest") pip install -qU langchain-openai import getpassimport osos.environ["TOGETHER_API_KEY"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI( base_url="https://api.together.xyz/v1", api_key=os.environ["TOGETHER_API_KEY"], model="mistralai/Mixtral-8x7B-Instruct-v0.1",) # | echo: falsefrom langchain_anthropic import ChatAnthropicllm = ChatAnthropic(model="claude-3-sonnet-20240229") **API Reference:**[ChatAnthropic](https://api.python.langchain.com/en/latest/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html) from langchain_core.output_parsers import StrOutputParserfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.runnables import Runnable, RunnablePassthrough, chaincontextualize_instructions = """Convert the latest user question into a standalone question given the chat history. Don't answer the question, return the question and nothing else (no descriptive text)."""contextualize_prompt = ChatPromptTemplate.from_messages( [ ("system", contextualize_instructions), ("placeholder", "{chat_history}"), ("human", "{question}"), ])contextualize_question = contextualize_prompt | llm | StrOutputParser()qa_instructions = ( """Answer the user question given the following context:\n\n{context}.""")qa_prompt = ChatPromptTemplate.from_messages( [("system", qa_instructions), ("human", "{question}")])@chaindef contextualize_if_needed(input_: dict) -> Runnable: if input_.get("chat_history"): # NOTE: This is returning another Runnable, not an actual output. return contextualize_question else: return RunnablePassthrough()@chaindef fake_retriever(input_: dict) -> str: return "egypt's population in 2024 is about 111 million"full_chain = ( RunnablePassthrough.assign(question=contextualize_if_needed).assign( context=fake_retriever ) | qa_prompt | llm | StrOutputParser())full_chain.invoke( { "question": "what about egypt", "chat_history": [ ("human", "what's the population of indonesia"), ("ai", "about 276 million"), ], }) **API Reference:**[StrOutputParser](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.string.StrOutputParser.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [Runnable](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html) | [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html) | [chain](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.chain.html) "According to the context provided, Egypt's population in 2024 is estimated to be about 111 million." The key here is that `contextualize_if_needed` returns another Runnable and not an actual output. This returned Runnable is itself run when the full chain is executed. Looking at the trace we can see that, since we passed in chat\_history, we executed the contextualize\_question chain as part of the full chain: [https://smith.langchain.com/public/9e0ae34c-4082-4f3f-beed-34a2a2f4c991/r](https://smith.langchain.com/public/9e0ae34c-4082-4f3f-beed-34a2a2f4c991/r) Note that the streaming, batching, etc. capabilities of the returned Runnable are all preserved for chunk in contextualize_if_needed.stream( { "question": "what about egypt", "chat_history": [ ("human", "what's the population of indonesia"), ("ai", "about 276 million"), ], }): print(chunk) What is the population of Egypt? [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/dynamic_chain.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to load PDFs ](/v0.2/docs/how_to/document_loader_pdf/)[ Next Text embedding models ](/v0.2/docs/how_to/embed_text/)
null
https://python.langchain.com/v0.2/docs/how_to/embed_text/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * Text embedding models On this page Text embedding models ===================== info Head to [Integrations](/v0.2/docs/integrations/text_embedding/) for documentation on built-in integrations with text embedding model providers. The Embeddings class is a class designed for interfacing with text embedding models. There are lots of embedding model providers (OpenAI, Cohere, Hugging Face, etc) - this class is designed to provide a standard interface for all of them. Embeddings create a vector representation of a piece of text. This is useful because it means we can think about text in the vector space, and do things like semantic search where we look for pieces of text that are most similar in the vector space. The base Embeddings class in LangChain provides two methods: one for embedding documents and one for embedding a query. The former, `.embed_documents`, takes as input multiple texts, while the latter, `.embed_query`, takes a single text. The reason for having these as two separate methods is that some embedding providers have different embedding methods for documents (to be searched over) vs queries (the search query itself). `.embed_query` will return a list of floats, whereas `.embed_documents` returns a list of lists of floats. Get started[​](#get-started "Direct link to Get started") --------------------------------------------------------- ### Setup[​](#setup "Direct link to Setup") * OpenAI * Cohere * Hugging Face To start we'll need to install the OpenAI partner package: pip install langchain-openai Accessing the API requires an API key, which you can get by creating an account and heading [here](https://platform.openai.com/account/api-keys). Once we have a key we'll want to set it as an environment variable by running: export OPENAI_API_KEY="..." If you'd prefer not to set an environment variable you can pass the key in directly via the `api_key` named parameter when initiating the OpenAI LLM class: from langchain_openai import OpenAIEmbeddingsembeddings_model = OpenAIEmbeddings(api_key="...") **API Reference:**[OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) Otherwise you can initialize without any params: from langchain_openai import OpenAIEmbeddingsembeddings_model = OpenAIEmbeddings() **API Reference:**[OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) To start we'll need to install the Cohere SDK package: pip install langchain-cohere Accessing the API requires an API key, which you can get by creating an account and heading [here](https://dashboard.cohere.com/api-keys). Once we have a key we'll want to set it as an environment variable by running: export COHERE_API_KEY="..." If you'd prefer not to set an environment variable you can pass the key in directly via the `cohere_api_key` named parameter when initiating the Cohere LLM class: from langchain_cohere import CohereEmbeddingsembeddings_model = CohereEmbeddings(cohere_api_key="...") **API Reference:**[CohereEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_cohere.embeddings.CohereEmbeddings.html) Otherwise you can initialize without any params: from langchain_cohere import CohereEmbeddingsembeddings_model = CohereEmbeddings() **API Reference:**[CohereEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_cohere.embeddings.CohereEmbeddings.html) To start we'll need to install the Hugging Face partner package: pip install langchain-huggingface You can then load any [Sentence Transformers model](https://huggingface.co/models?library=sentence-transformers) from the Hugging Face Hub. from langchain_huggingface import HuggingFaceEmbeddingsembeddings_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2") **API Reference:**[HuggingFaceEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_huggingface.embeddings.huggingface.HuggingFaceEmbeddings.html) You can also leave the `model_name` blank to use the default [sentence-transformers/all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) model. from langchain_huggingface import HuggingFaceEmbeddingsembeddings_model = HuggingFaceEmbeddings() **API Reference:**[HuggingFaceEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_huggingface.embeddings.huggingface.HuggingFaceEmbeddings.html) ### `embed_documents`[​](#embed_documents "Direct link to embed_documents") #### Embed list of texts[​](#embed-list-of-texts "Direct link to Embed list of texts") Use `.embed_documents` to embed a list of strings, recovering a list of embeddings: embeddings = embeddings_model.embed_documents( [ "Hi there!", "Oh, hello!", "What's your name?", "My friends call me World", "Hello World!" ])len(embeddings), len(embeddings[0]) (5, 1536) ### `embed_query`[​](#embed_query "Direct link to embed_query") #### Embed single query[​](#embed-single-query "Direct link to Embed single query") Use `.embed_query` to embed a single piece of text (e.g., for the purpose of comparing to other embedded pieces of texts). embedded_query = embeddings_model.embed_query("What was the name mentioned in the conversation?")embedded_query[:5] [0.0053587136790156364, -0.0004999046213924885, 0.038883671164512634, -0.003001077566295862, -0.00900818221271038] [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/embed_text.mdx) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to create a dynamic (self-constructing) chain ](/v0.2/docs/how_to/dynamic_chain/)[ Next How to combine results from multiple retrievers ](/v0.2/docs/how_to/ensemble_retriever/) * [Get started](#get-started) * [Setup](#setup) * [`embed_documents`](#embed_documents) * [`embed_query`](#embed_query)
null
https://python.langchain.com/v0.2/docs/how_to/ensemble_retriever/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to combine results from multiple retrievers On this page How to combine results from multiple retrievers =============================================== The [EnsembleRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.ensemble.EnsembleRetriever.html) supports ensembling of results from multiple retrievers. It is initialized with a list of [BaseRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain_core.retrievers.BaseRetriever.html) objects. EnsembleRetrievers rerank the results of the constituent retrievers based on the [Reciprocal Rank Fusion](https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf) algorithm. By leveraging the strengths of different algorithms, the `EnsembleRetriever` can achieve better performance than any single algorithm. The most common pattern is to combine a sparse retriever (like BM25) with a dense retriever (like embedding similarity), because their strengths are complementary. It is also known as "hybrid search". The sparse retriever is good at finding relevant documents based on keywords, while the dense retriever is good at finding relevant documents based on semantic similarity. Basic usage[​](#basic-usage "Direct link to Basic usage") --------------------------------------------------------- Below we demonstrate ensembling of a [BM25Retriever](https://api.python.langchain.com/en/latest/retrievers/langchain_community.retrievers.bm25.BM25Retriever.html) with a retriever derived from the [FAISS vector store](https://api.python.langchain.com/en/latest/vectorstores/langchain_community.vectorstores.faiss.FAISS.html). %pip install --upgrade --quiet rank_bm25 > /dev/null from langchain.retrievers import EnsembleRetrieverfrom langchain_community.retrievers import BM25Retrieverfrom langchain_community.vectorstores import FAISSfrom langchain_openai import OpenAIEmbeddingsdoc_list_1 = [ "I like apples", "I like oranges", "Apples and oranges are fruits",]# initialize the bm25 retriever and faiss retrieverbm25_retriever = BM25Retriever.from_texts( doc_list_1, metadatas=[{"source": 1}] * len(doc_list_1))bm25_retriever.k = 2doc_list_2 = [ "You like apples", "You like oranges",]embedding = OpenAIEmbeddings()faiss_vectorstore = FAISS.from_texts( doc_list_2, embedding, metadatas=[{"source": 2}] * len(doc_list_2))faiss_retriever = faiss_vectorstore.as_retriever(search_kwargs={"k": 2})# initialize the ensemble retrieverensemble_retriever = EnsembleRetriever( retrievers=[bm25_retriever, faiss_retriever], weights=[0.5, 0.5]) **API Reference:**[EnsembleRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.ensemble.EnsembleRetriever.html) | [BM25Retriever](https://api.python.langchain.com/en/latest/retrievers/langchain_community.retrievers.bm25.BM25Retriever.html) | [FAISS](https://api.python.langchain.com/en/latest/vectorstores/langchain_community.vectorstores.faiss.FAISS.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) docs = ensemble_retriever.invoke("apples")docs [Document(page_content='I like apples', metadata={'source': 1}), Document(page_content='You like apples', metadata={'source': 2}), Document(page_content='Apples and oranges are fruits', metadata={'source': 1}), Document(page_content='You like oranges', metadata={'source': 2})] Runtime Configuration[​](#runtime-configuration "Direct link to Runtime Configuration") --------------------------------------------------------------------------------------- We can also configure the individual retrievers at runtime using [configurable fields](/v0.2/docs/how_to/configure/). Below we update the "top-k" parameter for the FAISS retriever specifically: from langchain_core.runnables import ConfigurableFieldfaiss_retriever = faiss_vectorstore.as_retriever( search_kwargs={"k": 2}).configurable_fields( search_kwargs=ConfigurableField( id="search_kwargs_faiss", name="Search Kwargs", description="The search kwargs to use", ))ensemble_retriever = EnsembleRetriever( retrievers=[bm25_retriever, faiss_retriever], weights=[0.5, 0.5]) **API Reference:**[ConfigurableField](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.utils.ConfigurableField.html) config = {"configurable": {"search_kwargs_faiss": {"k": 1}}}docs = ensemble_retriever.invoke("apples", config=config)docs [Document(page_content='I like apples', metadata={'source': 1}), Document(page_content='You like apples', metadata={'source': 2}), Document(page_content='Apples and oranges are fruits', metadata={'source': 1})] Notice that this only returns one source from the FAISS retriever, because we pass in the relevant configuration at run time [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/ensemble_retriever.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous Text embedding models ](/v0.2/docs/how_to/embed_text/)[ Next How to select examples by length ](/v0.2/docs/how_to/example_selectors_length_based/) * [Basic usage](#basic-usage) * [Runtime Configuration](#runtime-configuration)
null
https://python.langchain.com/v0.2/docs/how_to/example_selectors_mmr/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to select examples by maximal marginal relevance (MMR) How to select examples by maximal marginal relevance (MMR) ========================================================== The `MaxMarginalRelevanceExampleSelector` selects examples based on a combination of which examples are most similar to the inputs, while also optimizing for diversity. It does this by finding the examples with the embeddings that have the greatest cosine similarity with the inputs, and then iteratively adding them while penalizing them for closeness to already selected examples. from langchain_community.vectorstores import FAISSfrom langchain_core.example_selectors import ( MaxMarginalRelevanceExampleSelector, SemanticSimilarityExampleSelector,)from langchain_core.prompts import FewShotPromptTemplate, PromptTemplatefrom langchain_openai import OpenAIEmbeddingsexample_prompt = PromptTemplate( input_variables=["input", "output"], template="Input: {input}\nOutput: {output}",)# Examples of a pretend task of creating antonyms.examples = [ {"input": "happy", "output": "sad"}, {"input": "tall", "output": "short"}, {"input": "energetic", "output": "lethargic"}, {"input": "sunny", "output": "gloomy"}, {"input": "windy", "output": "calm"},] **API Reference:**[FAISS](https://api.python.langchain.com/en/latest/vectorstores/langchain_community.vectorstores.faiss.FAISS.html) | [MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html) | [SemanticSimilarityExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.SemanticSimilarityExampleSelector.html) | [FewShotPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.few_shot.FewShotPromptTemplate.html) | [PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) example_selector = MaxMarginalRelevanceExampleSelector.from_examples( # The list of examples available to select from. examples, # The embedding class used to produce embeddings which are used to measure semantic similarity. OpenAIEmbeddings(), # The VectorStore class that is used to store the embeddings and do a similarity search over. FAISS, # The number of examples to produce. k=2,)mmr_prompt = FewShotPromptTemplate( # We provide an ExampleSelector instead of examples. example_selector=example_selector, example_prompt=example_prompt, prefix="Give the antonym of every input", suffix="Input: {adjective}\nOutput:", input_variables=["adjective"],) # Input is a feeling, so should select the happy/sad example as the first oneprint(mmr_prompt.format(adjective="worried")) Give the antonym of every inputInput: happyOutput: sadInput: windyOutput: calmInput: worriedOutput: # Let's compare this to what we would just get if we went solely off of similarity,# by using SemanticSimilarityExampleSelector instead of MaxMarginalRelevanceExampleSelector.example_selector = SemanticSimilarityExampleSelector.from_examples( # The list of examples available to select from. examples, # The embedding class used to produce embeddings which are used to measure semantic similarity. OpenAIEmbeddings(), # The VectorStore class that is used to store the embeddings and do a similarity search over. FAISS, # The number of examples to produce. k=2,)similar_prompt = FewShotPromptTemplate( # We provide an ExampleSelector instead of examples. example_selector=example_selector, example_prompt=example_prompt, prefix="Give the antonym of every input", suffix="Input: {adjective}\nOutput:", input_variables=["adjective"],)print(similar_prompt.format(adjective="worried")) Give the antonym of every inputInput: happyOutput: sadInput: sunnyOutput: gloomyInput: worriedOutput: [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/example_selectors_mmr.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to select examples by length ](/v0.2/docs/how_to/example_selectors_length_based/)[ Next How to select examples by n-gram overlap ](/v0.2/docs/how_to/example_selectors_ngram/)
null
https://python.langchain.com/v0.2/docs/how_to/example_selectors_length_based/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to select examples by length How to select examples by length ================================ This example selector selects which examples to use based on length. This is useful when you are worried about constructing a prompt that will go over the length of the context window. For longer inputs, it will select fewer examples to include, while for shorter inputs it will select more. from langchain_core.example_selectors import LengthBasedExampleSelectorfrom langchain_core.prompts import FewShotPromptTemplate, PromptTemplate# Examples of a pretend task of creating antonyms.examples = [ {"input": "happy", "output": "sad"}, {"input": "tall", "output": "short"}, {"input": "energetic", "output": "lethargic"}, {"input": "sunny", "output": "gloomy"}, {"input": "windy", "output": "calm"},]example_prompt = PromptTemplate( input_variables=["input", "output"], template="Input: {input}\nOutput: {output}",)example_selector = LengthBasedExampleSelector( # The examples it has available to choose from. examples=examples, # The PromptTemplate being used to format the examples. example_prompt=example_prompt, # The maximum length that the formatted examples should be. # Length is measured by the get_text_length function below. max_length=25, # The function used to get the length of a string, which is used # to determine which examples to include. It is commented out because # it is provided as a default value if none is specified. # get_text_length: Callable[[str], int] = lambda x: len(re.split("\n| ", x)))dynamic_prompt = FewShotPromptTemplate( # We provide an ExampleSelector instead of examples. example_selector=example_selector, example_prompt=example_prompt, prefix="Give the antonym of every input", suffix="Input: {adjective}\nOutput:", input_variables=["adjective"],) **API Reference:**[LengthBasedExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.length_based.LengthBasedExampleSelector.html) | [FewShotPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.few_shot.FewShotPromptTemplate.html) | [PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html) # An example with small input, so it selects all examples.print(dynamic_prompt.format(adjective="big")) Give the antonym of every inputInput: happyOutput: sadInput: tallOutput: shortInput: energeticOutput: lethargicInput: sunnyOutput: gloomyInput: windyOutput: calmInput: bigOutput: # An example with long input, so it selects only one example.long_string = "big and huge and massive and large and gigantic and tall and much much much much much bigger than everything else"print(dynamic_prompt.format(adjective=long_string)) Give the antonym of every inputInput: happyOutput: sadInput: big and huge and massive and large and gigantic and tall and much much much much much bigger than everything elseOutput: # You can add an example to an example selector as well.new_example = {"input": "big", "output": "small"}dynamic_prompt.example_selector.add_example(new_example)print(dynamic_prompt.format(adjective="enthusiastic")) Give the antonym of every inputInput: happyOutput: sadInput: tallOutput: shortInput: energeticOutput: lethargicInput: sunnyOutput: gloomyInput: windyOutput: calmInput: bigOutput: smallInput: enthusiasticOutput: [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/example_selectors_length_based.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to combine results from multiple retrievers ](/v0.2/docs/how_to/ensemble_retriever/)[ Next How to select examples by maximal marginal relevance (MMR) ](/v0.2/docs/how_to/example_selectors_mmr/)
null
https://python.langchain.com/v0.2/docs/how_to/example_selectors_similarity/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to select examples by similarity How to select examples by similarity ==================================== This object selects examples based on similarity to the inputs. It does this by finding the examples with the embeddings that have the greatest cosine similarity with the inputs. from langchain_chroma import Chromafrom langchain_core.example_selectors import SemanticSimilarityExampleSelectorfrom langchain_core.prompts import FewShotPromptTemplate, PromptTemplatefrom langchain_openai import OpenAIEmbeddingsexample_prompt = PromptTemplate( input_variables=["input", "output"], template="Input: {input}\nOutput: {output}",)# Examples of a pretend task of creating antonyms.examples = [ {"input": "happy", "output": "sad"}, {"input": "tall", "output": "short"}, {"input": "energetic", "output": "lethargic"}, {"input": "sunny", "output": "gloomy"}, {"input": "windy", "output": "calm"},] **API Reference:**[SemanticSimilarityExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.SemanticSimilarityExampleSelector.html) | [FewShotPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.few_shot.FewShotPromptTemplate.html) | [PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) example_selector = SemanticSimilarityExampleSelector.from_examples( # The list of examples available to select from. examples, # The embedding class used to produce embeddings which are used to measure semantic similarity. OpenAIEmbeddings(), # The VectorStore class that is used to store the embeddings and do a similarity search over. Chroma, # The number of examples to produce. k=1,)similar_prompt = FewShotPromptTemplate( # We provide an ExampleSelector instead of examples. example_selector=example_selector, example_prompt=example_prompt, prefix="Give the antonym of every input", suffix="Input: {adjective}\nOutput:", input_variables=["adjective"],) # Input is a feeling, so should select the happy/sad exampleprint(similar_prompt.format(adjective="worried")) Give the antonym of every inputInput: happyOutput: sadInput: worriedOutput: # Input is a measurement, so should select the tall/short exampleprint(similar_prompt.format(adjective="large")) Give the antonym of every inputInput: tallOutput: shortInput: largeOutput: # You can add new examples to the SemanticSimilarityExampleSelector as wellsimilar_prompt.example_selector.add_example( {"input": "enthusiastic", "output": "apathetic"})print(similar_prompt.format(adjective="passionate")) Give the antonym of every inputInput: enthusiasticOutput: apatheticInput: passionateOutput: [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/example_selectors_similarity.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to select examples by n-gram overlap ](/v0.2/docs/how_to/example_selectors_ngram/)[ Next How to use reference examples when doing extraction ](/v0.2/docs/how_to/extraction_examples/)
null
https://python.langchain.com/v0.2/docs/how_to/example_selectors_ngram/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to select examples by n-gram overlap How to select examples by n-gram overlap ======================================== The `NGramOverlapExampleSelector` selects and orders examples based on which examples are most similar to the input, according to an ngram overlap score. The ngram overlap score is a float between 0.0 and 1.0, inclusive. The selector allows for a threshold score to be set. Examples with an ngram overlap score less than or equal to the threshold are excluded. The threshold is set to -1.0, by default, so will not exclude any examples, only reorder them. Setting the threshold to 0.0 will exclude examples that have no ngram overlaps with the input. from langchain_community.example_selectors import NGramOverlapExampleSelectorfrom langchain_core.prompts import FewShotPromptTemplate, PromptTemplateexample_prompt = PromptTemplate( input_variables=["input", "output"], template="Input: {input}\nOutput: {output}",)# Examples of a fictional translation task.examples = [ {"input": "See Spot run.", "output": "Ver correr a Spot."}, {"input": "My dog barks.", "output": "Mi perro ladra."}, {"input": "Spot can run.", "output": "Spot puede correr."},] **API Reference:**[NGramOverlapExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_community.example_selectors.ngram_overlap.NGramOverlapExampleSelector.html) | [FewShotPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.few_shot.FewShotPromptTemplate.html) | [PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html) example_selector = NGramOverlapExampleSelector( # The examples it has available to choose from. examples=examples, # The PromptTemplate being used to format the examples. example_prompt=example_prompt, # The threshold, at which selector stops. # It is set to -1.0 by default. threshold=-1.0, # For negative threshold: # Selector sorts examples by ngram overlap score, and excludes none. # For threshold greater than 1.0: # Selector excludes all examples, and returns an empty list. # For threshold equal to 0.0: # Selector sorts examples by ngram overlap score, # and excludes those with no ngram overlap with input.)dynamic_prompt = FewShotPromptTemplate( # We provide an ExampleSelector instead of examples. example_selector=example_selector, example_prompt=example_prompt, prefix="Give the Spanish translation of every input", suffix="Input: {sentence}\nOutput:", input_variables=["sentence"],) # An example input with large ngram overlap with "Spot can run."# and no overlap with "My dog barks."print(dynamic_prompt.format(sentence="Spot can run fast.")) Give the Spanish translation of every inputInput: Spot can run.Output: Spot puede correr.Input: See Spot run.Output: Ver correr a Spot.Input: My dog barks.Output: Mi perro ladra.Input: Spot can run fast.Output: # You can add examples to NGramOverlapExampleSelector as well.new_example = {"input": "Spot plays fetch.", "output": "Spot juega a buscar."}example_selector.add_example(new_example)print(dynamic_prompt.format(sentence="Spot can run fast.")) Give the Spanish translation of every inputInput: Spot can run.Output: Spot puede correr.Input: See Spot run.Output: Ver correr a Spot.Input: Spot plays fetch.Output: Spot juega a buscar.Input: My dog barks.Output: Mi perro ladra.Input: Spot can run fast.Output: # You can set a threshold at which examples are excluded.# For example, setting threshold equal to 0.0# excludes examples with no ngram overlaps with input.# Since "My dog barks." has no ngram overlaps with "Spot can run fast."# it is excluded.example_selector.threshold = 0.0print(dynamic_prompt.format(sentence="Spot can run fast.")) Give the Spanish translation of every inputInput: Spot can run.Output: Spot puede correr.Input: See Spot run.Output: Ver correr a Spot.Input: Spot plays fetch.Output: Spot juega a buscar.Input: Spot can run fast.Output: # Setting small nonzero thresholdexample_selector.threshold = 0.09print(dynamic_prompt.format(sentence="Spot can play fetch.")) Give the Spanish translation of every inputInput: Spot can run.Output: Spot puede correr.Input: Spot plays fetch.Output: Spot juega a buscar.Input: Spot can play fetch.Output: # Setting threshold greater than 1.0example_selector.threshold = 1.0 + 1e-9print(dynamic_prompt.format(sentence="Spot can play fetch.")) Give the Spanish translation of every inputInput: Spot can play fetch.Output: [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/example_selectors_ngram.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to select examples by maximal marginal relevance (MMR) ](/v0.2/docs/how_to/example_selectors_mmr/)[ Next How to select examples by similarity ](/v0.2/docs/how_to/example_selectors_similarity/)
null
https://python.langchain.com/v0.2/docs/how_to/extraction_examples/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to use reference examples when doing extraction On this page How to use reference examples when doing extraction =================================================== The quality of extractions can often be improved by providing reference examples to the LLM. Data extraction attempts to generate structured representations of information found in text and other unstructured or semi-structured formats. [Tool-calling](/v0.2/docs/concepts/#functiontool-calling) LLM features are often used in this context. This guide demonstrates how to build few-shot examples of tool calls to help steer the behavior of extraction and similar applications. tip While this guide focuses how to use examples with a tool calling model, this technique is generally applicable, and will work also with JSON more or prompt based techniques. LangChain implements a [tool-call attribute](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage.tool_calls) on messages from LLMs that include tool calls. See our [how-to guide on tool calling](/v0.2/docs/how_to/tool_calling/) for more detail. To build reference examples for data extraction, we build a chat history containing a sequence of: * [HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) containing example inputs; * [AIMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html) containing example tool calls; * [ToolMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolMessage.html) containing example tool outputs. LangChain adopts this convention for structuring tool calls into conversation across LLM model providers. First we build a prompt template that includes a placeholder for these messages: from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder# Define a custom prompt to provide instructions and any additional context.# 1) You can add examples into the prompt template to improve extraction quality# 2) Introduce additional parameters to take context into account (e.g., include metadata# about the document from which the text was extracted.)prompt = ChatPromptTemplate.from_messages( [ ( "system", "You are an expert extraction algorithm. " "Only extract relevant information from the text. " "If you do not know the value of an attribute asked " "to extract, return null for the attribute's value.", ), # ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓ MessagesPlaceholder("examples"), # <-- EXAMPLES! # ↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑ ("human", "{text}"), ]) **API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [MessagesPlaceholder](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.MessagesPlaceholder.html) Test out the template: from langchain_core.messages import ( HumanMessage,)prompt.invoke( {"text": "this is some text", "examples": [HumanMessage(content="testing 1 2 3")]}) **API Reference:**[HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) ChatPromptValue(messages=[SystemMessage(content="You are an expert extraction algorithm. Only extract relevant information from the text. If you do not know the value of an attribute asked to extract, return null for the attribute's value."), HumanMessage(content='testing 1 2 3'), HumanMessage(content='this is some text')]) Define the schema[​](#define-the-schema "Direct link to Define the schema") --------------------------------------------------------------------------- Let's re-use the person schema from the [extraction tutorial](/v0.2/docs/tutorials/extraction/). from typing import List, Optionalfrom langchain_core.pydantic_v1 import BaseModel, Fieldfrom langchain_openai import ChatOpenAIclass Person(BaseModel): """Information about a person.""" # ^ Doc-string for the entity Person. # This doc-string is sent to the LLM as the description of the schema Person, # and it can help to improve extraction results. # Note that: # 1. Each field is an `optional` -- this allows the model to decline to extract it! # 2. Each field has a `description` -- this description is used by the LLM. # Having a good description can help improve extraction results. name: Optional[str] = Field(..., description="The name of the person") hair_color: Optional[str] = Field( ..., description="The color of the person's hair if known" ) height_in_meters: Optional[str] = Field(..., description="Height in METERs")class Data(BaseModel): """Extracted data about people.""" # Creates a model so that we can extract multiple entities. people: List[Person] **API Reference:**[ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) Define reference examples[​](#define-reference-examples "Direct link to Define reference examples") --------------------------------------------------------------------------------------------------- Examples can be defined as a list of input-output pairs. Each example contains an example `input` text and an example `output` showing what should be extracted from the text. info This is a bit in the weeds, so feel free to skip. The format of the example needs to match the API used (e.g., tool calling or JSON mode etc.). Here, the formatted examples will match the format expected for the tool calling API since that's what we're using. import uuidfrom typing import Dict, List, TypedDictfrom langchain_core.messages import ( AIMessage, BaseMessage, HumanMessage, SystemMessage, ToolMessage,)from langchain_core.pydantic_v1 import BaseModel, Fieldclass Example(TypedDict): """A representation of an example consisting of text input and expected tool calls. For extraction, the tool calls are represented as instances of pydantic model. """ input: str # This is the example text tool_calls: List[BaseModel] # Instances of pydantic model that should be extracteddef tool_example_to_messages(example: Example) -> List[BaseMessage]: """Convert an example into a list of messages that can be fed into an LLM. This code is an adapter that converts our example to a list of messages that can be fed into a chat model. The list of messages per example corresponds to: 1) HumanMessage: contains the content from which content should be extracted. 2) AIMessage: contains the extracted information from the model 3) ToolMessage: contains confirmation to the model that the model requested a tool correctly. The ToolMessage is required because some of the chat models are hyper-optimized for agents rather than for an extraction use case. """ messages: List[BaseMessage] = [HumanMessage(content=example["input"])] tool_calls = [] for tool_call in example["tool_calls"]: tool_calls.append( { "id": str(uuid.uuid4()), "args": tool_call.dict(), # The name of the function right now corresponds # to the name of the pydantic model # This is implicit in the API right now, # and will be improved over time. "name": tool_call.__class__.__name__, }, ) messages.append(AIMessage(content="", tool_calls=tool_calls)) tool_outputs = example.get("tool_outputs") or [ "You have correctly called this tool." ] * len(tool_calls) for output, tool_call in zip(tool_outputs, tool_calls): messages.append(ToolMessage(content=output, tool_call_id=tool_call["id"])) return messages **API Reference:**[AIMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html) | [BaseMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.base.BaseMessage.html) | [HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) | [SystemMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.system.SystemMessage.html) | [ToolMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolMessage.html) Next let's define our examples and then convert them into message format. examples = [ ( "The ocean is vast and blue. It's more than 20,000 feet deep. There are many fish in it.", Person(name=None, height_in_meters=None, hair_color=None), ), ( "Fiona traveled far from France to Spain.", Person(name="Fiona", height_in_meters=None, hair_color=None), ),]messages = []for text, tool_call in examples: messages.extend( tool_example_to_messages({"input": text, "tool_calls": [tool_call]}) ) Let's test out the prompt example_prompt = prompt.invoke({"text": "this is some text", "examples": messages})for message in example_prompt.messages: print(f"{message.type}: {message}") system: content="You are an expert extraction algorithm. Only extract relevant information from the text. If you do not know the value of an attribute asked to extract, return null for the attribute's value."human: content="The ocean is vast and blue. It's more than 20,000 feet deep. There are many fish in it."ai: content='' tool_calls=[{'name': 'Person', 'args': {'name': None, 'hair_color': None, 'height_in_meters': None}, 'id': 'b843ba77-4c9c-48ef-92a4-54e534f24521'}]tool: content='You have correctly called this tool.' tool_call_id='b843ba77-4c9c-48ef-92a4-54e534f24521'human: content='Fiona traveled far from France to Spain.'ai: content='' tool_calls=[{'name': 'Person', 'args': {'name': 'Fiona', 'hair_color': None, 'height_in_meters': None}, 'id': '46f00d6b-50e5-4482-9406-b07bb10340f6'}]tool: content='You have correctly called this tool.' tool_call_id='46f00d6b-50e5-4482-9406-b07bb10340f6'human: content='this is some text' Create an extractor[​](#create-an-extractor "Direct link to Create an extractor") --------------------------------------------------------------------------------- Let's select an LLM. Because we are using tool-calling, we will need a model that supports a tool-calling feature. See [this table](/v0.2/docs/integrations/chat/) for available LLMs. * OpenAI * Anthropic * Azure * Google * Cohere * FireworksAI * Groq * MistralAI * TogetherAI pip install -qU langchain-openai import getpassimport osos.environ["OPENAI_API_KEY"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI(model="gpt-4-0125-preview", temperature=0) pip install -qU langchain-anthropic import getpassimport osos.environ["ANTHROPIC_API_KEY"] = getpass.getpass()from langchain_anthropic import ChatAnthropicllm = ChatAnthropic(model="claude-3-sonnet-20240229") pip install -qU langchain-openai import getpassimport osos.environ["AZURE_OPENAI_API_KEY"] = getpass.getpass()from langchain_openai import AzureChatOpenAIllm = AzureChatOpenAI( azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"], openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],) pip install -qU langchain-google-vertexai import getpassimport osos.environ["GOOGLE_API_KEY"] = getpass.getpass()from langchain_google_vertexai import ChatVertexAIllm = ChatVertexAI(model="gemini-pro") pip install -qU langchain-cohere import getpassimport osos.environ["COHERE_API_KEY"] = getpass.getpass()from langchain_cohere import ChatCoherellm = ChatCohere(model="command-r") pip install -qU langchain-fireworks import getpassimport osos.environ["FIREWORKS_API_KEY"] = getpass.getpass()from langchain_fireworks import ChatFireworksllm = ChatFireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct") pip install -qU langchain-groq import getpassimport osos.environ["GROQ_API_KEY"] = getpass.getpass()from langchain_groq import ChatGroqllm = ChatGroq(model="llama3-8b-8192") pip install -qU langchain-mistralai import getpassimport osos.environ["MISTRAL_API_KEY"] = getpass.getpass()from langchain_mistralai import ChatMistralAIllm = ChatMistralAI(model="mistral-large-latest") pip install -qU langchain-openai import getpassimport osos.environ["TOGETHER_API_KEY"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI( base_url="https://api.together.xyz/v1", api_key=os.environ["TOGETHER_API_KEY"], model="mistralai/Mixtral-8x7B-Instruct-v0.1",) Following the [extraction tutorial](/v0.2/docs/tutorials/extraction/), we use the `.with_structured_output` method to structure model outputs according to the desired schema: runnable = prompt | llm.with_structured_output( schema=Data, method="function_calling", include_raw=False,) Without examples 😿[​](#without-examples- "Direct link to Without examples 😿") ------------------------------------------------------------------------------- Notice that even capable models can fail with a **very simple** test case! for _ in range(5): text = "The solar system is large, but earth has only 1 moon." print(runnable.invoke({"text": text, "examples": []})) people=[Person(name='earth', hair_color='null', height_in_meters='null')]people=[Person(name='earth', hair_color='null', height_in_meters='null')]people=[]people=[Person(name='earth', hair_color='null', height_in_meters='null')]people=[] With examples 😻[​](#with-examples- "Direct link to With examples 😻") ---------------------------------------------------------------------- Reference examples helps to fix the failure! for _ in range(5): text = "The solar system is large, but earth has only 1 moon." print(runnable.invoke({"text": text, "examples": messages})) people=[]people=[]people=[]people=[]people=[] Note that we can see the few-shot examples as tool-calls in the [Langsmith trace](https://smith.langchain.com/public/4c436bc2-a1ce-440b-82f5-093947542e40/r). And we retain performance on a positive sample: runnable.invoke( { "text": "My name is Harrison. My hair is black.", "examples": messages, }) Data(people=[Person(name='Harrison', hair_color='black', height_in_meters=None)]) [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/extraction_examples.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to select examples by similarity ](/v0.2/docs/how_to/example_selectors_similarity/)[ Next How to handle long text when doing extraction ](/v0.2/docs/how_to/extraction_long_text/) * [Define the schema](#define-the-schema) * [Define reference examples](#define-reference-examples) * [Create an extractor](#create-an-extractor) * [Without examples 😿](#without-examples-) * [With examples 😻](#with-examples-)
null
https://python.langchain.com/v0.2/docs/integrations/platforms/
* [](/v0.2/) * Providers On this page Providers ========= info If you'd like to write your own integration, see [Extending LangChain](/v0.2/docs/how_to/#custom). If you'd like to contribute an integration, see [Contributing integrations](/v0.2/docs/contributing/integrations/). LangChain integrates with many providers. Partner Packages[​](#partner-packages "Direct link to Partner Packages") ------------------------------------------------------------------------ These providers have standalone `langchain-{provider}` packages for improved versioning, dependency management and testing. * [AI21](/v0.2/docs/integrations/providers/ai21/) * [Airbyte](/v0.2/docs/integrations/providers/airbyte/) * [Amazon Web Services](/v0.2/docs/integrations/platforms/aws/) * [Anthropic](/v0.2/docs/integrations/platforms/anthropic/) * [Astra DB](/v0.2/docs/integrations/providers/astradb/) * [Cohere](/v0.2/docs/integrations/providers/cohere/) * [Couchbase](/v0.2/docs/integrations/providers/couchbase/) * [Elasticsearch](/v0.2/docs/integrations/providers/elasticsearch/) * [Exa Search](/v0.2/docs/integrations/providers/exa_search/) * [Fireworks](/v0.2/docs/integrations/providers/fireworks/) * [Google](/v0.2/docs/integrations/platforms/google/) * [Groq](/v0.2/docs/integrations/providers/groq/) * [IBM](/v0.2/docs/integrations/providers/ibm/) * [MistralAI](/v0.2/docs/integrations/providers/mistralai/) * [MongoDB](/v0.2/docs/integrations/providers/mongodb_atlas/) * [Nomic](/v0.2/docs/integrations/providers/nomic/) * [Nvidia](/v0.2/docs/integrations/providers/nvidia/) * [OpenAI](/v0.2/docs/integrations/platforms/openai/) * [Pinecone](/v0.2/docs/integrations/providers/pinecone/) * [Qdrant](/v0.2/docs/integrations/providers/qdrant/) * [Robocorp](/v0.2/docs/integrations/providers/robocorp/) * [Together AI](/v0.2/docs/integrations/providers/together/) * [Upstage](/v0.2/docs/integrations/providers/upstage/) * [Voyage AI](/v0.2/docs/integrations/providers/voyageai/) Featured Community Providers[​](#featured-community-providers "Direct link to Featured Community Providers") ------------------------------------------------------------------------------------------------------------ * [Hugging Face](/v0.2/docs/integrations/platforms/huggingface/) * [Microsoft](/v0.2/docs/integrations/platforms/microsoft/) All Providers[​](#all-providers "Direct link to All Providers") --------------------------------------------------------------- Click [here](/v0.2/docs/integrations/providers/) to see all providers. [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/integrations/platforms/index.mdx) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Next Providers ](/v0.2/docs/integrations/platforms/) * [Partner Packages](#partner-packages) * [Featured Community Providers](#featured-community-providers) * [All Providers](#all-providers)
null
https://python.langchain.com/v0.2/
!function(){function t(t){document.documentElement.setAttribute("data-theme",t)}var e=function(){var t=null;try{t=new URLSearchParams(window.location.search).get("docusaurus-theme")}catch(t){}return t}()||function(){var t=null;try{t=localStorage.getItem("theme")}catch(t){}return t}();null!==e?t(e):window.matchMedia("(prefers-color-scheme: dark)").matches?t("dark"):(window.matchMedia("(prefers-color-scheme: light)").matches,t("light"))}(),document.documentElement.setAttribute("data-announcement-bar-initially-dismissed",function(){try{return"true"===localStorage.getItem("docusaurus.announcement.dismiss")}catch(t){}return!1}())
null
https://python.langchain.com/v0.2/docs/how_to/extraction_parse/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to use prompting alone (no tool calling) to do extraction On this page How to use prompting alone (no tool calling) to do extraction ============================================================= Tool calling features are not required for generating structured output from LLMs. LLMs that are able to follow prompt instructions well can be tasked with outputting information in a given format. This approach relies on designing good prompts and then parsing the output of the LLMs to make them extract information well. To extract data without tool-calling features: 1. Instruct the LLM to generate text following an expected format (e.g., JSON with a certain schema); 2. Use [output parsers](/v0.2/docs/concepts/#output-parsers) to structure the model response into a desired Python object. First we select a LLM: * OpenAI * Anthropic * Azure * Google * Cohere * FireworksAI * Groq * MistralAI * TogetherAI pip install -qU langchain-openai import getpassimport osos.environ["OPENAI_API_KEY"] = getpass.getpass()from langchain_openai import ChatOpenAImodel = ChatOpenAI(model="gpt-3.5-turbo-0125") pip install -qU langchain-anthropic import getpassimport osos.environ["ANTHROPIC_API_KEY"] = getpass.getpass()from langchain_anthropic import ChatAnthropicmodel = ChatAnthropic(model="claude-3-sonnet-20240229") pip install -qU langchain-openai import getpassimport osos.environ["AZURE_OPENAI_API_KEY"] = getpass.getpass()from langchain_openai import AzureChatOpenAImodel = AzureChatOpenAI( azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"], openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],) pip install -qU langchain-google-vertexai import getpassimport osos.environ["GOOGLE_API_KEY"] = getpass.getpass()from langchain_google_vertexai import ChatVertexAImodel = ChatVertexAI(model="gemini-pro") pip install -qU langchain-cohere import getpassimport osos.environ["COHERE_API_KEY"] = getpass.getpass()from langchain_cohere import ChatCoheremodel = ChatCohere(model="command-r") pip install -qU langchain-fireworks import getpassimport osos.environ["FIREWORKS_API_KEY"] = getpass.getpass()from langchain_fireworks import ChatFireworksmodel = ChatFireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct") pip install -qU langchain-groq import getpassimport osos.environ["GROQ_API_KEY"] = getpass.getpass()from langchain_groq import ChatGroqmodel = ChatGroq(model="llama3-8b-8192") pip install -qU langchain-mistralai import getpassimport osos.environ["MISTRAL_API_KEY"] = getpass.getpass()from langchain_mistralai import ChatMistralAImodel = ChatMistralAI(model="mistral-large-latest") pip install -qU langchain-openai import getpassimport osos.environ["TOGETHER_API_KEY"] = getpass.getpass()from langchain_openai import ChatOpenAImodel = ChatOpenAI( base_url="https://api.together.xyz/v1", api_key=os.environ["TOGETHER_API_KEY"], model="mistralai/Mixtral-8x7B-Instruct-v0.1",) tip This tutorial is meant to be simple, but generally should really include reference examples to squeeze out performance! Using PydanticOutputParser[​](#using-pydanticoutputparser "Direct link to Using PydanticOutputParser") ------------------------------------------------------------------------------------------------------ The following example uses the built-in `PydanticOutputParser` to parse the output of a chat model. from typing import List, Optionalfrom langchain_core.output_parsers import PydanticOutputParserfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.pydantic_v1 import BaseModel, Field, validatorclass Person(BaseModel): """Information about a person.""" name: str = Field(..., description="The name of the person") height_in_meters: float = Field( ..., description="The height of the person expressed in meters." )class People(BaseModel): """Identifying information about all people in a text.""" people: List[Person]# Set up a parserparser = PydanticOutputParser(pydantic_object=People)# Promptprompt = ChatPromptTemplate.from_messages( [ ( "system", "Answer the user query. Wrap the output in `json` tags\n{format_instructions}", ), ("human", "{query}"), ]).partial(format_instructions=parser.get_format_instructions()) **API Reference:**[PydanticOutputParser](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.pydantic.PydanticOutputParser.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) Let's take a look at what information is sent to the model query = "Anna is 23 years old and she is 6 feet tall" print(prompt.format_prompt(query=query).to_string()) System: Answer the user query. Wrap the output in `json` tagsThe output should be formatted as a JSON instance that conforms to the JSON schema below.As an example, for the schema {"properties": {"foo": {"title": "Foo", "description": "a list of strings", "type": "array", "items": {"type": "string"}}}, "required": ["foo"]}the object {"foo": ["bar", "baz"]} is a well-formatted instance of the schema. The object {"properties": {"foo": ["bar", "baz"]}} is not well-formatted.Here is the output schema: {"description": "Identifying information about all people in a text.", "properties": {"people": {"title": "People", "type": "array", "items": {"$ref": "#/definitions/Person"}}}, "required": \["people"\], "definitions": {"Person": {"title": "Person", "description": "Information about a person.", "type": "object", "properties": {"name": {"title": "Name", "description": "The name of the person", "type": "string"}, "height\_in\_meters": {"title": "Height In Meters", "description": "The height of the person expressed in meters.", "type": "number"}}, "required": \["name", "height\_in\_meters"\]}}} Human: Anna is 23 years old and she is 6 feet tall Having defined our prompt, we simply chain together the prompt, model and output parser: chain = prompt | model | parserchain.invoke({"query": query}) People(people=[Person(name='Anna', height_in_meters=1.83)]) Check out the associated [Langsmith trace](https://smith.langchain.com/public/92ed52a3-92b9-45af-a663-0a9c00e5e396/r). Note that the schema shows up in two places: 1. In the prompt, via `parser.get_format_instructions()`; 2. In the chain, to receive the formatted output and structure it into a Python object (in this case, the Pydantic object `People`). Custom Parsing[​](#custom-parsing "Direct link to Custom Parsing") ------------------------------------------------------------------ If desired, it's easy to create a custom prompt and parser with `LangChain` and `LCEL`. To create a custom parser, define a function to parse the output from the model (typically an [AIMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html)) into an object of your choice. See below for a simple implementation of a JSON parser. import jsonimport refrom typing import List, Optionalfrom langchain_anthropic.chat_models import ChatAnthropicfrom langchain_core.messages import AIMessagefrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.pydantic_v1 import BaseModel, Field, validatorclass Person(BaseModel): """Information about a person.""" name: str = Field(..., description="The name of the person") height_in_meters: float = Field( ..., description="The height of the person expressed in meters." )class People(BaseModel): """Identifying information about all people in a text.""" people: List[Person]# Promptprompt = ChatPromptTemplate.from_messages( [ ( "system", "Answer the user query. Output your answer as JSON that " "matches the given schema: ```json\n{schema}\n```. " "Make sure to wrap the answer in ```json and ``` tags", ), ("human", "{query}"), ]).partial(schema=People.schema())# Custom parserdef extract_json(message: AIMessage) -> List[dict]: """Extracts JSON content from a string where JSON is embedded between ```json and ``` tags. Parameters: text (str): The text containing the JSON content. Returns: list: A list of extracted JSON strings. """ text = message.content # Define the regular expression pattern to match JSON blocks pattern = r"```json(.*?)```" # Find all non-overlapping matches of the pattern in the string matches = re.findall(pattern, text, re.DOTALL) # Return the list of matched JSON strings, stripping any leading or trailing whitespace try: return [json.loads(match.strip()) for match in matches] except Exception: raise ValueError(f"Failed to parse: {message}") **API Reference:**[ChatAnthropic](https://api.python.langchain.com/en/latest/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html) | [AIMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) query = "Anna is 23 years old and she is 6 feet tall"print(prompt.format_prompt(query=query).to_string()) System: Answer the user query. Output your answer as JSON that matches the given schema: ```json{'title': 'People', 'description': 'Identifying information about all people in a text.', 'type': 'object', 'properties': {'people': {'title': 'People', 'type': 'array', 'items': {'$ref': '#/definitions/Person'}}}, 'required': ['people'], 'definitions': {'Person': {'title': 'Person', 'description': 'Information about a person.', 'type': 'object', 'properties': {'name': {'title': 'Name', 'description': 'The name of the person', 'type': 'string'}, 'height_in_meters': {'title': 'Height In Meters', 'description': 'The height of the person expressed in meters.', 'type': 'number'}}, 'required': ['name', 'height_in_meters']}}}```. Make sure to wrap the answer in ```json and ``` tagsHuman: Anna is 23 years old and she is 6 feet tall chain = prompt | model | extract_jsonchain.invoke({"query": query}) [{'people': [{'name': 'Anna', 'height_in_meters': 1.83}]}] Other Libraries[​](#other-libraries "Direct link to Other Libraries") --------------------------------------------------------------------- If you're looking at extracting using a parsing approach, check out the [Kor](https://eyurtsev.github.io/kor/) library. It's written by one of the `LangChain` maintainers and it helps to craft a prompt that takes examples into account, allows controlling formats (e.g., JSON or CSV) and expresses the schema in TypeScript. It seems to work pretty! [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/extraction_parse.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to handle long text when doing extraction ](/v0.2/docs/how_to/extraction_long_text/)[ Next How to add fallbacks to a runnable ](/v0.2/docs/how_to/fallbacks/) * [Using PydanticOutputParser](#using-pydanticoutputparser) * [Custom Parsing](#custom-parsing) * [Other Libraries](#other-libraries)
null
https://python.langchain.com/v0.2/docs/how_to/hybrid/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * Hybrid Search On this page Hybrid Search ============= The standard search in LangChain is done by vector similarity. However, a number of vectorstores implementations (Astra DB, ElasticSearch, Neo4J, AzureSearch, ...) also support more advanced search combining vector similarity search and other search techniques (full-text, BM25, and so on). This is generally referred to as "Hybrid" search. **Step 1: Make sure the vectorstore you are using supports hybrid search** At the moment, there is no unified way to perform hybrid search in LangChain. Each vectorstore may have their own way to do it. This is generally exposed as a keyword argument that is passed in during `similarity_search`. By reading the documentation or source code, figure out whether the vectorstore you are using supports hybrid search, and, if so, how to use it. **Step 2: Add that parameter as a configurable field for the chain** This will let you easily call the chain and configure any relevant flags at runtime. See [this documentation](/v0.2/docs/how_to/configure/) for more information on configuration. **Step 3: Call the chain with that configurable field** Now, at runtime you can call this chain with configurable field. Code Example[​](#code-example "Direct link to Code Example") ------------------------------------------------------------ Let's see a concrete example of what this looks like in code. We will use the Cassandra/CQL interface of Astra DB for this example. Install the following Python package: !pip install "cassio>=0.1.7" Get the [connection secrets](https://docs.datastax.com/en/astra/astra-db-vector/get-started/quickstart.html). Initialize cassio: import cassiocassio.init( database_id="Your database ID", token="Your application token", keyspace="Your key space",) Create the Cassandra VectorStore with a standard [index analyzer](https://docs.datastax.com/en/astra/astra-db-vector/cql/use-analyzers-with-cql.html). The index analyzer is needed to enable term matching. from cassio.table.cql import STANDARD_ANALYZERfrom langchain_community.vectorstores import Cassandrafrom langchain_openai import OpenAIEmbeddingsembeddings = OpenAIEmbeddings()vectorstore = Cassandra( embedding=embeddings, table_name="test_hybrid", body_index_options=[STANDARD_ANALYZER], session=None, keyspace=None,)vectorstore.add_texts( [ "In 2023, I visited Paris", "In 2022, I visited New York", "In 2021, I visited New Orleans", ]) **API Reference:**[Cassandra](https://api.python.langchain.com/en/latest/vectorstores/langchain_community.vectorstores.cassandra.Cassandra.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) If we do a standard similarity search, we get all the documents: vectorstore.as_retriever().invoke("What city did I visit last?") [Document(page_content='In 2022, I visited New York'),Document(page_content='In 2023, I visited Paris'),Document(page_content='In 2021, I visited New Orleans')] The Astra DB vectorstore `body_search` argument can be used to filter the search on the term `new`. vectorstore.as_retriever(search_kwargs={"body_search": "new"}).invoke( "What city did I visit last?") [Document(page_content='In 2022, I visited New York'),Document(page_content='In 2021, I visited New Orleans')] We can now create the chain that we will use to do question-answering over from langchain_core.output_parsers import StrOutputParserfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.runnables import ( ConfigurableField, RunnablePassthrough,)from langchain_openai import ChatOpenAI **API Reference:**[StrOutputParser](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.string.StrOutputParser.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [ConfigurableField](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.utils.ConfigurableField.html) | [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) This is basic question-answering chain set up. template = """Answer the question based only on the following context:{context}Question: {question}"""prompt = ChatPromptTemplate.from_template(template)model = ChatOpenAI()retriever = vectorstore.as_retriever() Here we mark the retriever as having a configurable field. All vectorstore retrievers have `search_kwargs` as a field. This is just a dictionary, with vectorstore specific fields configurable_retriever = retriever.configurable_fields( search_kwargs=ConfigurableField( id="search_kwargs", name="Search Kwargs", description="The search kwargs to use", )) We can now create the chain using our configurable retriever chain = ( {"context": configurable_retriever, "question": RunnablePassthrough()} | prompt | model | StrOutputParser()) chain.invoke("What city did I visit last?") Paris We can now invoke the chain with configurable options. `search_kwargs` is the id of the configurable field. The value is the search kwargs to use for Astra DB. chain.invoke( "What city did I visit last?", config={"configurable": {"search_kwargs": {"body_search": "new"}}},) New York [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/hybrid.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to filter messages ](/v0.2/docs/how_to/filter_messages/)[ Next How to use the LangChain indexing API ](/v0.2/docs/how_to/indexing/) * [Code Example](#code-example)
null
https://python.langchain.com/v0.2/docs/how_to/extraction_long_text/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to handle long text when doing extraction On this page How to handle long text when doing extraction ============================================= When working with files, like PDFs, you're likely to encounter text that exceeds your language model's context window. To process this text, consider these strategies: 1. **Change LLM** Choose a different LLM that supports a larger context window. 2. **Brute Force** Chunk the document, and extract content from each chunk. 3. **RAG** Chunk the document, index the chunks, and only extract content from a subset of chunks that look "relevant". Keep in mind that these strategies have different trade off and the best strategy likely depends on the application that you're designing! This guide demonstrates how to implement strategies 2 and 3. Set up[​](#set-up "Direct link to Set up") ------------------------------------------ We need some example data! Let's download an article about [cars from wikipedia](https://en.wikipedia.org/wiki/Car) and load it as a LangChain [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html). import reimport requestsfrom langchain_community.document_loaders import BSHTMLLoader# Download the contentresponse = requests.get("https://en.wikipedia.org/wiki/Car")# Write it to a filewith open("car.html", "w", encoding="utf-8") as f: f.write(response.text)# Load it with an HTML parserloader = BSHTMLLoader("car.html")document = loader.load()[0]# Clean up code# Replace consecutive new lines with a single new linedocument.page_content = re.sub("\n\n+", "\n", document.page_content) **API Reference:**[BSHTMLLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.html_bs.BSHTMLLoader.html) print(len(document.page_content)) 79174 Define the schema[​](#define-the-schema "Direct link to Define the schema") --------------------------------------------------------------------------- Following the [extraction tutorial](/v0.2/docs/tutorials/extraction/), we will use Pydantic to define the schema of information we wish to extract. In this case, we will extract a list of "key developments" (e.g., important historical events) that include a year and description. Note that we also include an `evidence` key and instruct the model to provide in verbatim the relevant sentences of text from the article. This allows us to compare the extraction results to (the model's reconstruction of) text from the original document. from typing import List, Optionalfrom langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholderfrom langchain_core.pydantic_v1 import BaseModel, Fieldclass KeyDevelopment(BaseModel): """Information about a development in the history of cars.""" year: int = Field( ..., description="The year when there was an important historic development." ) description: str = Field( ..., description="What happened in this year? What was the development?" ) evidence: str = Field( ..., description="Repeat in verbatim the sentence(s) from which the year and description information were extracted", )class ExtractionData(BaseModel): """Extracted information about key developments in the history of cars.""" key_developments: List[KeyDevelopment]# Define a custom prompt to provide instructions and any additional context.# 1) You can add examples into the prompt template to improve extraction quality# 2) Introduce additional parameters to take context into account (e.g., include metadata# about the document from which the text was extracted.)prompt = ChatPromptTemplate.from_messages( [ ( "system", "You are an expert at identifying key historic development in text. " "Only extract important historic developments. Extract nothing if no important information can be found in the text.", ), ("human", "{text}"), ]) **API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [MessagesPlaceholder](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.MessagesPlaceholder.html) Create an extractor[​](#create-an-extractor "Direct link to Create an extractor") --------------------------------------------------------------------------------- Let's select an LLM. Because we are using tool-calling, we will need a model that supports a tool-calling feature. See [this table](/v0.2/docs/integrations/chat/) for available LLMs. * OpenAI * Anthropic * Azure * Google * Cohere * FireworksAI * Groq * MistralAI * TogetherAI pip install -qU langchain-openai import getpassimport osos.environ["OPENAI_API_KEY"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI(model="gpt-4-0125-preview", temperature=0) pip install -qU langchain-anthropic import getpassimport osos.environ["ANTHROPIC_API_KEY"] = getpass.getpass()from langchain_anthropic import ChatAnthropicllm = ChatAnthropic(model="claude-3-sonnet-20240229") pip install -qU langchain-openai import getpassimport osos.environ["AZURE_OPENAI_API_KEY"] = getpass.getpass()from langchain_openai import AzureChatOpenAIllm = AzureChatOpenAI( azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"], openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],) pip install -qU langchain-google-vertexai import getpassimport osos.environ["GOOGLE_API_KEY"] = getpass.getpass()from langchain_google_vertexai import ChatVertexAIllm = ChatVertexAI(model="gemini-pro") pip install -qU langchain-cohere import getpassimport osos.environ["COHERE_API_KEY"] = getpass.getpass()from langchain_cohere import ChatCoherellm = ChatCohere(model="command-r") pip install -qU langchain-fireworks import getpassimport osos.environ["FIREWORKS_API_KEY"] = getpass.getpass()from langchain_fireworks import ChatFireworksllm = ChatFireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct") pip install -qU langchain-groq import getpassimport osos.environ["GROQ_API_KEY"] = getpass.getpass()from langchain_groq import ChatGroqllm = ChatGroq(model="llama3-8b-8192") pip install -qU langchain-mistralai import getpassimport osos.environ["MISTRAL_API_KEY"] = getpass.getpass()from langchain_mistralai import ChatMistralAIllm = ChatMistralAI(model="mistral-large-latest") pip install -qU langchain-openai import getpassimport osos.environ["TOGETHER_API_KEY"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI( base_url="https://api.together.xyz/v1", api_key=os.environ["TOGETHER_API_KEY"], model="mistralai/Mixtral-8x7B-Instruct-v0.1",) extractor = prompt | llm.with_structured_output( schema=ExtractionData, include_raw=False,) Brute force approach[​](#brute-force-approach "Direct link to Brute force approach") ------------------------------------------------------------------------------------ Split the documents into chunks such that each chunk fits into the context window of the LLMs. from langchain_text_splitters import TokenTextSplittertext_splitter = TokenTextSplitter( # Controls the size of each chunk chunk_size=2000, # Controls overlap between chunks chunk_overlap=20,)texts = text_splitter.split_text(document.page_content) **API Reference:**[TokenTextSplitter](https://api.python.langchain.com/en/latest/base/langchain_text_splitters.base.TokenTextSplitter.html) Use [batch](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html) functionality to run the extraction in **parallel** across each chunk! tip You can often use .batch() to parallelize the extractions! `.batch` uses a threadpool under the hood to help you parallelize workloads. If your model is exposed via an API, this will likely speed up your extraction flow! # Limit just to the first 3 chunks# so the code can be re-run quicklyfirst_few = texts[:3]extractions = extractor.batch( [{"text": text} for text in first_few], {"max_concurrency": 5}, # limit the concurrency by passing max concurrency!) ### Merge results[​](#merge-results "Direct link to Merge results") After extracting data from across the chunks, we'll want to merge the extractions together. key_developments = []for extraction in extractions: key_developments.extend(extraction.key_developments)key_developments[:10] [KeyDevelopment(year=1966, description='The Toyota Corolla began production, becoming the best-selling series of automobile in history.', evidence='The Toyota Corolla, which has been in production since 1966, is the best-selling series of automobile in history.'), KeyDevelopment(year=1769, description='Nicolas-Joseph Cugnot built the first steam-powered road vehicle.', evidence='The French inventor Nicolas-Joseph Cugnot built the first steam-powered road vehicle in 1769.'), KeyDevelopment(year=1808, description='François Isaac de Rivaz designed and constructed the first internal combustion-powered automobile.', evidence='the Swiss inventor François Isaac de Rivaz designed and constructed the first internal combustion-powered automobile in 1808.'), KeyDevelopment(year=1886, description='Carl Benz patented his Benz Patent-Motorwagen, inventing the modern car.', evidence='The modern car—a practical, marketable automobile for everyday use—was invented in 1886, when the German inventor Carl Benz patented his Benz Patent-Motorwagen.'), KeyDevelopment(year=1908, description='Ford Model T, one of the first cars affordable by the masses, began production.', evidence='One of the first cars affordable by the masses was the Ford Model T, begun in 1908, an American car manufactured by the Ford Motor Company.'), KeyDevelopment(year=1888, description="Bertha Benz undertook the first road trip by car to prove the road-worthiness of her husband's invention.", evidence="In August 1888, Bertha Benz, the wife of Carl Benz, undertook the first road trip by car, to prove the road-worthiness of her husband's invention."), KeyDevelopment(year=1896, description='Benz designed and patented the first internal-combustion flat engine, called boxermotor.', evidence='In 1896, Benz designed and patented the first internal-combustion flat engine, called boxermotor.'), KeyDevelopment(year=1897, description='Nesselsdorfer Wagenbau produced the Präsident automobil, one of the first factory-made cars in the world.', evidence='The first motor car in central Europe and one of the first factory-made cars in the world, was produced by Czech company Nesselsdorfer Wagenbau (later renamed to Tatra) in 1897, the Präsident automobil.'), KeyDevelopment(year=1890, description='Daimler Motoren Gesellschaft (DMG) was founded by Daimler and Maybach in Cannstatt.', evidence='Daimler and Maybach founded Daimler Motoren Gesellschaft (DMG) in Cannstatt in 1890.'), KeyDevelopment(year=1891, description='Auguste Doriot and Louis Rigoulot completed the longest trip by a petrol-driven vehicle with a Daimler powered Peugeot Type 3.', evidence='In 1891, Auguste Doriot and his Peugeot colleague Louis Rigoulot completed the longest trip by a petrol-driven vehicle when their self-designed and built Daimler powered Peugeot Type 3 completed 2,100 kilometres (1,300 mi) from Valentigney to Paris and Brest and back again.')] RAG based approach[​](#rag-based-approach "Direct link to RAG based approach") ------------------------------------------------------------------------------ Another simple idea is to chunk up the text, but instead of extracting information from every chunk, just focus on the the most relevant chunks. caution It can be difficult to identify which chunks are relevant. For example, in the `car` article we're using here, most of the article contains key development information. So by using **RAG**, we'll likely be throwing out a lot of relevant information. We suggest experimenting with your use case and determining whether this approach works or not. To implement the RAG based approach: 1. Chunk up your document(s) and index them (e.g., in a vectorstore); 2. Prepend the `extractor` chain with a retrieval step using the vectorstore. Here's a simple example that relies on the `FAISS` vectorstore. from langchain_community.vectorstores import FAISSfrom langchain_core.documents import Documentfrom langchain_core.runnables import RunnableLambdafrom langchain_openai import OpenAIEmbeddingsfrom langchain_text_splitters import CharacterTextSplittertexts = text_splitter.split_text(document.page_content)vectorstore = FAISS.from_texts(texts, embedding=OpenAIEmbeddings())retriever = vectorstore.as_retriever( search_kwargs={"k": 1}) # Only extract from first document **API Reference:**[FAISS](https://api.python.langchain.com/en/latest/vectorstores/langchain_community.vectorstores.faiss.FAISS.html) | [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) | [RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) | [CharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.CharacterTextSplitter.html) In this case the RAG extractor is only looking at the top document. rag_extractor = { "text": retriever | (lambda docs: docs[0].page_content) # fetch content of top doc} | extractor results = rag_extractor.invoke("Key developments associated with cars") for key_development in results.key_developments: print(key_development) year=1869 description='Mary Ward became one of the first documented car fatalities in Parsonstown, Ireland.' evidence='Mary Ward became one of the first documented car fatalities in 1869 in Parsonstown, Ireland,'year=1899 description="Henry Bliss one of the US's first pedestrian car casualties in New York City." evidence="Henry Bliss one of the US's first pedestrian car casualties in 1899 in New York City."year=2030 description='All fossil fuel vehicles will be banned in Amsterdam.' evidence='all fossil fuel vehicles will be banned in Amsterdam from 2030.' Common issues[​](#common-issues "Direct link to Common issues") --------------------------------------------------------------- Different methods have their own pros and cons related to cost, speed, and accuracy. Watch out for these issues: * Chunking content means that the LLM can fail to extract information if the information is spread across multiple chunks. * Large chunk overlap may cause the same information to be extracted twice, so be prepared to de-duplicate! * LLMs can make up data. If looking for a single fact across a large text and using a brute force approach, you may end up getting more made up data. [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/extraction_long_text.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to use reference examples when doing extraction ](/v0.2/docs/how_to/extraction_examples/)[ Next How to use prompting alone (no tool calling) to do extraction ](/v0.2/docs/how_to/extraction_parse/) * [Set up](#set-up) * [Define the schema](#define-the-schema) * [Create an extractor](#create-an-extractor) * [Brute force approach](#brute-force-approach) * [Merge results](#merge-results) * [RAG based approach](#rag-based-approach) * [Common issues](#common-issues)
null
https://python.langchain.com/v0.2/docs/how_to/inspect/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to inspect runnables On this page How to inspect runnables ======================== Prerequisites This guide assumes familiarity with the following concepts: * [LangChain Expression Language (LCEL)](/v0.2/docs/concepts/#langchain-expression-language) * [Chaining runnables](/v0.2/docs/how_to/sequence/) Once you create a runnable with [LangChain Expression Language](/v0.2/docs/concepts/#langchain-expression-language), you may often want to inspect it to get a better sense for what is going on. This notebook covers some methods for doing so. This guide shows some ways you can programmatically introspect the internal steps of chains. If you are instead interested in debugging issues in your chain, see [this section](/v0.2/docs/how_to/debugging/) instead. First, let's create an example chain. We will create one that does retrieval: %pip install -qU langchain langchain-openai faiss-cpu tiktoken from langchain_community.vectorstores import FAISSfrom langchain_core.output_parsers import StrOutputParserfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.runnables import RunnablePassthroughfrom langchain_openai import ChatOpenAI, OpenAIEmbeddingsvectorstore = FAISS.from_texts( ["harrison worked at kensho"], embedding=OpenAIEmbeddings())retriever = vectorstore.as_retriever()template = """Answer the question based only on the following context:{context}Question: {question}"""prompt = ChatPromptTemplate.from_template(template)model = ChatOpenAI()chain = ( {"context": retriever, "question": RunnablePassthrough()} | prompt | model | StrOutputParser()) **API Reference:**[FAISS](https://api.python.langchain.com/en/latest/vectorstores/langchain_community.vectorstores.faiss.FAISS.html) | [StrOutputParser](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.string.StrOutputParser.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) Get a graph[​](#get-a-graph "Direct link to Get a graph") --------------------------------------------------------- You can use the `get_graph()` method to get a graph representation of the runnable: chain.get_graph() Print a graph[​](#print-a-graph "Direct link to Print a graph") --------------------------------------------------------------- While that is not super legible, you can use the `print_ascii()` method to show that graph in a way that's easier to understand: chain.get_graph().print_ascii() +---------------------------------+ | Parallel<context,question>Input | +---------------------------------+ ** ** *** *** ** ** +----------------------+ +-------------+ | VectorStoreRetriever | | Passthrough | +----------------------+ +-------------+ ** ** *** *** ** ** +----------------------------------+ | Parallel<context,question>Output | +----------------------------------+ * * * +--------------------+ | ChatPromptTemplate | +--------------------+ * * * +------------+ | ChatOpenAI | +------------+ * * * +-----------------+ | StrOutputParser | +-----------------+ * * * +-----------------------+ | StrOutputParserOutput | +-----------------------+ Get the prompts[​](#get-the-prompts "Direct link to Get the prompts") --------------------------------------------------------------------- You may want to see just the prompts that are used in a chain with the `get_prompts()` method: chain.get_prompts() [ChatPromptTemplate(input_variables=['context', 'question'], messages=[HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['context', 'question'], template='Answer the question based only on the following context:\n{context}\n\nQuestion: {question}\n'))])] Next steps[​](#next-steps "Direct link to Next steps") ------------------------------------------------------ You've now learned how to introspect your composed LCEL chains. Next, check out the other how-to guides on runnables in this section, or the related how-to guide on [debugging your chains](/v0.2/docs/how_to/debugging/). [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/inspect.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to use the LangChain indexing API ](/v0.2/docs/how_to/indexing/)[ Next LangChain Expression Language Cheatsheet ](/v0.2/docs/how_to/lcel_cheatsheet/) * [Get a graph](#get-a-graph) * [Print a graph](#print-a-graph) * [Get the prompts](#get-the-prompts) * [Next steps](#next-steps)
null
https://python.langchain.com/v0.2/docs/how_to/fallbacks/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to add fallbacks to a runnable On this page How to add fallbacks to a runnable ================================== When working with language models, you may often encounter issues from the underlying APIs, whether these be rate limiting or downtime. Therefore, as you go to move your LLM applications into production it becomes more and more important to safeguard against these. That's why we've introduced the concept of fallbacks. A **fallback** is an alternative plan that may be used in an emergency. Crucially, fallbacks can be applied not only on the LLM level but on the whole runnable level. This is important because often times different models require different prompts. So if your call to OpenAI fails, you don't just want to send the same prompt to Anthropic - you probably want to use a different prompt template and send a different version there. Fallback for LLM API Errors[​](#fallback-for-llm-api-errors "Direct link to Fallback for LLM API Errors") --------------------------------------------------------------------------------------------------------- This is maybe the most common use case for fallbacks. A request to an LLM API can fail for a variety of reasons - the API could be down, you could have hit rate limits, any number of things. Therefore, using fallbacks can help protect against these types of things. IMPORTANT: By default, a lot of the LLM wrappers catch errors and retry. You will most likely want to turn those off when working with fallbacks. Otherwise the first wrapper will keep on retrying and not failing. %pip install --upgrade --quiet langchain langchain-openai from langchain_anthropic import ChatAnthropicfrom langchain_openai import ChatOpenAI **API Reference:**[ChatAnthropic](https://api.python.langchain.com/en/latest/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) First, let's mock out what happens if we hit a RateLimitError from OpenAI from unittest.mock import patchimport httpxfrom openai import RateLimitErrorrequest = httpx.Request("GET", "/")response = httpx.Response(200, request=request)error = RateLimitError("rate limit", response=response, body="") # Note that we set max_retries = 0 to avoid retrying on RateLimits, etcopenai_llm = ChatOpenAI(model="gpt-3.5-turbo-0125", max_retries=0)anthropic_llm = ChatAnthropic(model="claude-3-haiku-20240307")llm = openai_llm.with_fallbacks([anthropic_llm]) # Let's use just the OpenAI LLm first, to show that we run into an errorwith patch("openai.resources.chat.completions.Completions.create", side_effect=error): try: print(openai_llm.invoke("Why did the chicken cross the road?")) except RateLimitError: print("Hit error") Hit error # Now let's try with fallbacks to Anthropicwith patch("openai.resources.chat.completions.Completions.create", side_effect=error): try: print(llm.invoke("Why did the chicken cross the road?")) except RateLimitError: print("Hit error") content=' I don\'t actually know why the chicken crossed the road, but here are some possible humorous answers:\n\n- To get to the other side!\n\n- It was too chicken to just stand there. \n\n- It wanted a change of scenery.\n\n- It wanted to show the possum it could be done.\n\n- It was on its way to a poultry farmers\' convention.\n\nThe joke plays on the double meaning of "the other side" - literally crossing the road to the other side, or the "other side" meaning the afterlife. So it\'s an anti-joke, with a silly or unexpected pun as the answer.' additional_kwargs={} example=False We can use our "LLM with Fallbacks" as we would a normal LLM. from langchain_core.prompts import ChatPromptTemplateprompt = ChatPromptTemplate.from_messages( [ ( "system", "You're a nice assistant who always includes a compliment in your response", ), ("human", "Why did the {animal} cross the road"), ])chain = prompt | llmwith patch("openai.resources.chat.completions.Completions.create", side_effect=error): try: print(chain.invoke({"animal": "kangaroo"})) except RateLimitError: print("Hit error") **API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) content=" I don't actually know why the kangaroo crossed the road, but I can take a guess! Here are some possible reasons:\n\n- To get to the other side (the classic joke answer!)\n\n- It was trying to find some food or water \n\n- It was trying to find a mate during mating season\n\n- It was fleeing from a predator or perceived threat\n\n- It was disoriented and crossed accidentally \n\n- It was following a herd of other kangaroos who were crossing\n\n- It wanted a change of scenery or environment \n\n- It was trying to reach a new habitat or territory\n\nThe real reason is unknown without more context, but hopefully one of those potential explanations does the joke justice! Let me know if you have any other animal jokes I can try to decipher." additional_kwargs={} example=False Fallback for Sequences[​](#fallback-for-sequences "Direct link to Fallback for Sequences") ------------------------------------------------------------------------------------------ We can also create fallbacks for sequences, that are sequences themselves. Here we do that with two different models: ChatOpenAI and then normal OpenAI (which does not use a chat model). Because OpenAI is NOT a chat model, you likely want a different prompt. # First let's create a chain with a ChatModel# We add in a string output parser here so the outputs between the two are the same typefrom langchain_core.output_parsers import StrOutputParserchat_prompt = ChatPromptTemplate.from_messages( [ ( "system", "You're a nice assistant who always includes a compliment in your response", ), ("human", "Why did the {animal} cross the road"), ])# Here we're going to use a bad model name to easily create a chain that will errorchat_model = ChatOpenAI(model="gpt-fake")bad_chain = chat_prompt | chat_model | StrOutputParser() **API Reference:**[StrOutputParser](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.string.StrOutputParser.html) # Now lets create a chain with the normal OpenAI modelfrom langchain_core.prompts import PromptTemplatefrom langchain_openai import OpenAIprompt_template = """Instructions: You should always include a compliment in your response.Question: Why did the {animal} cross the road?"""prompt = PromptTemplate.from_template(prompt_template)llm = OpenAI()good_chain = prompt | llm **API Reference:**[PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html) | [OpenAI](https://api.python.langchain.com/en/latest/llms/langchain_openai.llms.base.OpenAI.html) # We can now create a final chain which combines the twochain = bad_chain.with_fallbacks([good_chain])chain.invoke({"animal": "turtle"}) '\n\nAnswer: The turtle crossed the road to get to the other side, and I have to say he had some impressive determination.' Fallback for Long Inputs[​](#fallback-for-long-inputs "Direct link to Fallback for Long Inputs") ------------------------------------------------------------------------------------------------ One of the big limiting factors of LLMs is their context window. Usually, you can count and track the length of prompts before sending them to an LLM, but in situations where that is hard/complicated, you can fallback to a model with a longer context length. short_llm = ChatOpenAI()long_llm = ChatOpenAI(model="gpt-3.5-turbo-16k")llm = short_llm.with_fallbacks([long_llm]) inputs = "What is the next number: " + ", ".join(["one", "two"] * 3000) try: print(short_llm.invoke(inputs))except Exception as e: print(e) This model's maximum context length is 4097 tokens. However, your messages resulted in 12012 tokens. Please reduce the length of the messages. try: print(llm.invoke(inputs))except Exception as e: print(e) content='The next number in the sequence is two.' additional_kwargs={} example=False Fallback to Better Model[​](#fallback-to-better-model "Direct link to Fallback to Better Model") ------------------------------------------------------------------------------------------------ Often times we ask models to output format in a specific format (like JSON). Models like GPT-3.5 can do this okay, but sometimes struggle. This naturally points to fallbacks - we can try with GPT-3.5 (faster, cheaper), but then if parsing fails we can use GPT-4. from langchain.output_parsers import DatetimeOutputParser **API Reference:**[DatetimeOutputParser](https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.datetime.DatetimeOutputParser.html) prompt = ChatPromptTemplate.from_template( "what time was {event} (in %Y-%m-%dT%H:%M:%S.%fZ format - only return this value)") # In this case we are going to do the fallbacks on the LLM + output parser level# Because the error will get raised in the OutputParseropenai_35 = ChatOpenAI() | DatetimeOutputParser()openai_4 = ChatOpenAI(model="gpt-4") | DatetimeOutputParser() only_35 = prompt | openai_35fallback_4 = prompt | openai_35.with_fallbacks([openai_4]) try: print(only_35.invoke({"event": "the superbowl in 1994"}))except Exception as e: print(f"Error: {e}") Error: Could not parse datetime string: The Super Bowl in 1994 took place on January 30th at 3:30 PM local time. Converting this to the specified format (%Y-%m-%dT%H:%M:%S.%fZ) results in: 1994-01-30T15:30:00.000Z try: print(fallback_4.invoke({"event": "the superbowl in 1994"}))except Exception as e: print(f"Error: {e}") 1994-01-30 15:30:00 [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/fallbacks.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to use prompting alone (no tool calling) to do extraction ](/v0.2/docs/how_to/extraction_parse/)[ Next How to filter messages ](/v0.2/docs/how_to/filter_messages/) * [Fallback for LLM API Errors](#fallback-for-llm-api-errors) * [Fallback for Sequences](#fallback-for-sequences) * [Fallback for Long Inputs](#fallback-for-long-inputs) * [Fallback to Better Model](#fallback-to-better-model)
null
https://python.langchain.com/v0.2/docs/how_to/filter_messages/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to filter messages On this page How to filter messages ====================== In more complex chains and agents we might track state with a list of messages. This list can start to accumulate messages from multiple different models, speakers, sub-chains, etc., and we may only want to pass subsets of this full list of messages to each model call in the chain/agent. The `filter_messages` utility makes it easy to filter messages by type, id, or name. Basic usage[​](#basic-usage "Direct link to Basic usage") --------------------------------------------------------- from langchain_core.messages import ( AIMessage, HumanMessage, SystemMessage, filter_messages,)messages = [ SystemMessage("you are a good assistant", id="1"), HumanMessage("example input", id="2", name="example_user"), AIMessage("example output", id="3", name="example_assistant"), HumanMessage("real input", id="4", name="bob"), AIMessage("real output", id="5", name="alice"),]filter_messages(messages, include_types="human") **API Reference:**[AIMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html) | [HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) | [SystemMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.system.SystemMessage.html) | [filter\_messages](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.utils.filter_messages.html) [HumanMessage(content='example input', name='example_user', id='2'), HumanMessage(content='real input', name='bob', id='4')] filter_messages(messages, exclude_names=["example_user", "example_assistant"]) [SystemMessage(content='you are a good assistant', id='1'), HumanMessage(content='real input', name='bob', id='4'), AIMessage(content='real output', name='alice', id='5')] filter_messages(messages, include_types=[HumanMessage, AIMessage], exclude_ids=["3"]) [HumanMessage(content='example input', name='example_user', id='2'), HumanMessage(content='real input', name='bob', id='4'), AIMessage(content='real output', name='alice', id='5')] Chaining[​](#chaining "Direct link to Chaining") ------------------------------------------------ `filter_messages` can be used in an imperatively (like above) or declaratively, making it easy to compose with other components in a chain: # pip install -U langchain-anthropicfrom langchain_anthropic import ChatAnthropicllm = ChatAnthropic(model="claude-3-sonnet-20240229", temperature=0)# Notice we don't pass in messages. This creates# a RunnableLambda that takes messages as inputfilter_ = filter_messages(exclude_names=["example_user", "example_assistant"])chain = filter_ | llmchain.invoke(messages) **API Reference:**[ChatAnthropic](https://api.python.langchain.com/en/latest/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html) AIMessage(content=[], response_metadata={'id': 'msg_01Wz7gBHahAwkZ1KCBNtXmwA', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 3}}, id='run-b5d8a3fe-004f-4502-a071-a6c025031827-0', usage_metadata={'input_tokens': 16, 'output_tokens': 3, 'total_tokens': 19}) Looking at the LangSmith trace we can see that before the messages are passed to the model they are filtered: [https://smith.langchain.com/public/f808a724-e072-438e-9991-657cc9e7e253/r](https://smith.langchain.com/public/f808a724-e072-438e-9991-657cc9e7e253/r) Looking at just the filter\_, we can see that it's a Runnable object that can be invoked like all Runnables: filter_.invoke(messages) [HumanMessage(content='real input', name='bob', id='4'), AIMessage(content='real output', name='alice', id='5')] API reference[​](#api-reference "Direct link to API reference") --------------------------------------------------------------- For a complete description of all arguments head to the API reference: [https://api.python.langchain.com/en/latest/messages/langchain\_core.messages.utils.filter\_messages.html](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.utils.filter_messages.html) [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/filter_messages.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to add fallbacks to a runnable ](/v0.2/docs/how_to/fallbacks/)[ Next Hybrid Search ](/v0.2/docs/how_to/hybrid/) * [Basic usage](#basic-usage) * [Chaining](#chaining) * [API reference](#api-reference)
null
https://python.langchain.com/v0.2/docs/how_to/indexing/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to use the LangChain indexing API On this page How to use the LangChain indexing API ===================================== Here, we will look at a basic indexing workflow using the LangChain indexing API. The indexing API lets you load and keep in sync documents from any source into a vector store. Specifically, it helps: * Avoid writing duplicated content into the vector store * Avoid re-writing unchanged content * Avoid re-computing embeddings over unchanged content All of which should save you time and money, as well as improve your vector search results. Crucially, the indexing API will work even with documents that have gone through several transformation steps (e.g., via text chunking) with respect to the original source documents. How it works[​](#how-it-works "Direct link to How it works") ------------------------------------------------------------ LangChain indexing makes use of a record manager (`RecordManager`) that keeps track of document writes into the vector store. When indexing content, hashes are computed for each document, and the following information is stored in the record manager: * the document hash (hash of both page content and metadata) * write time * the source id -- each document should include information in its metadata to allow us to determine the ultimate source of this document Deletion modes[​](#deletion-modes "Direct link to Deletion modes") ------------------------------------------------------------------ When indexing documents into a vector store, it's possible that some existing documents in the vector store should be deleted. In certain situations you may want to remove any existing documents that are derived from the same sources as the new documents being indexed. In others you may want to delete all existing documents wholesale. The indexing API deletion modes let you pick the behavior you want: Cleanup Mode De-Duplicates Content Parallelizable Cleans Up Deleted Source Docs Cleans Up Mutations of Source Docs and/or Derived Docs Clean Up Timing None ✅ ✅ ❌ ❌ \- Incremental ✅ ✅ ❌ ✅ Continuously Full ✅ ❌ ✅ ✅ At end of indexing `None` does not do any automatic clean up, allowing the user to manually do clean up of old content. `incremental` and `full` offer the following automated clean up: * If the content of the source document or derived documents has **changed**, both `incremental` or `full` modes will clean up (delete) previous versions of the content. * If the source document has been **deleted** (meaning it is not included in the documents currently being indexed), the `full` cleanup mode will delete it from the vector store correctly, but the `incremental` mode will not. When content is mutated (e.g., the source PDF file was revised) there will be a period of time during indexing when both the new and old versions may be returned to the user. This happens after the new content was written, but before the old version was deleted. * `incremental` indexing minimizes this period of time as it is able to do clean up continuously, as it writes. * `full` mode does the clean up after all batches have been written. Requirements[​](#requirements "Direct link to Requirements") ------------------------------------------------------------ 1. Do not use with a store that has been pre-populated with content independently of the indexing API, as the record manager will not know that records have been inserted previously. 2. Only works with LangChain `vectorstore`'s that support: * document addition by id (`add_documents` method with `ids` argument) * delete by id (`delete` method with `ids` argument) Compatible Vectorstores: `Aerospike`, `AnalyticDB`, `AstraDB`, `AwaDB`, `AzureCosmosDBNoSqlVectorSearch`, `AzureCosmosDBVectorSearch`, `Bagel`, `Cassandra`, `Chroma`, `CouchbaseVectorStore`, `DashVector`, `DatabricksVectorSearch`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `HanaDB`, `Milvus`, `MyScale`, `OpenSearchVectorSearch`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `Rockset`, `ScaNN`, `SupabaseVectorStore`, `SurrealDBStore`, `TimescaleVector`, `Vald`, `VDMS`, `Vearch`, `VespaStore`, `Weaviate`, `Yellowbrick`, `ZepVectorStore`, `TencentVectorDB`, `OpenSearchVectorSearch`. Caution[​](#caution "Direct link to Caution") --------------------------------------------- The record manager relies on a time-based mechanism to determine what content can be cleaned up (when using `full` or `incremental` cleanup modes). If two tasks run back-to-back, and the first task finishes before the clock time changes, then the second task may not be able to clean up content. This is unlikely to be an issue in actual settings for the following reasons: 1. The RecordManager uses higher resolution timestamps. 2. The data would need to change between the first and the second tasks runs, which becomes unlikely if the time interval between the tasks is small. 3. Indexing tasks typically take more than a few ms. Quickstart[​](#quickstart "Direct link to Quickstart") ------------------------------------------------------ from langchain.indexes import SQLRecordManager, indexfrom langchain_core.documents import Documentfrom langchain_elasticsearch import ElasticsearchStorefrom langchain_openai import OpenAIEmbeddings **API Reference:**[SQLRecordManager](https://api.python.langchain.com/en/latest/indexes/langchain.indexes._sql_record_manager.SQLRecordManager.html) | [index](https://api.python.langchain.com/en/latest/indexing/langchain_core.indexing.api.index.html) | [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) | [ElasticsearchStore](https://api.python.langchain.com/en/latest/vectorstores/langchain_elasticsearch.vectorstores.ElasticsearchStore.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) Initialize a vector store and set up the embeddings: collection_name = "test_index"embedding = OpenAIEmbeddings()vectorstore = ElasticsearchStore( es_url="http://localhost:9200", index_name="test_index", embedding=embedding) Initialize a record manager with an appropriate namespace. **Suggestion:** Use a namespace that takes into account both the vector store and the collection name in the vector store; e.g., 'redis/my\_docs', 'chromadb/my\_docs' or 'postgres/my\_docs'. namespace = f"elasticsearch/{collection_name}"record_manager = SQLRecordManager( namespace, db_url="sqlite:///record_manager_cache.sql") Create a schema before using the record manager. record_manager.create_schema() Let's index some test documents: doc1 = Document(page_content="kitty", metadata={"source": "kitty.txt"})doc2 = Document(page_content="doggy", metadata={"source": "doggy.txt"}) Indexing into an empty vector store: def _clear(): """Hacky helper method to clear content. See the `full` mode section to to understand why it works.""" index([], record_manager, vectorstore, cleanup="full", source_id_key="source") ### `None` deletion mode[​](#none-deletion-mode "Direct link to none-deletion-mode") This mode does not do automatic clean up of old versions of content; however, it still takes care of content de-duplication. _clear() index( [doc1, doc1, doc1, doc1, doc1], record_manager, vectorstore, cleanup=None, source_id_key="source",) {'num_added': 1, 'num_updated': 0, 'num_skipped': 0, 'num_deleted': 0} _clear() index([doc1, doc2], record_manager, vectorstore, cleanup=None, source_id_key="source") {'num_added': 2, 'num_updated': 0, 'num_skipped': 0, 'num_deleted': 0} Second time around all content will be skipped: index([doc1, doc2], record_manager, vectorstore, cleanup=None, source_id_key="source") {'num_added': 0, 'num_updated': 0, 'num_skipped': 2, 'num_deleted': 0} ### `"incremental"` deletion mode[​](#incremental-deletion-mode "Direct link to incremental-deletion-mode") _clear() index( [doc1, doc2], record_manager, vectorstore, cleanup="incremental", source_id_key="source",) {'num_added': 2, 'num_updated': 0, 'num_skipped': 0, 'num_deleted': 0} Indexing again should result in both documents getting **skipped** -- also skipping the embedding operation! index( [doc1, doc2], record_manager, vectorstore, cleanup="incremental", source_id_key="source",) {'num_added': 0, 'num_updated': 0, 'num_skipped': 2, 'num_deleted': 0} If we provide no documents with incremental indexing mode, nothing will change. index([], record_manager, vectorstore, cleanup="incremental", source_id_key="source") {'num_added': 0, 'num_updated': 0, 'num_skipped': 0, 'num_deleted': 0} If we mutate a document, the new version will be written and all old versions sharing the same source will be deleted. changed_doc_2 = Document(page_content="puppy", metadata={"source": "doggy.txt"}) index( [changed_doc_2], record_manager, vectorstore, cleanup="incremental", source_id_key="source",) {'num_added': 1, 'num_updated': 0, 'num_skipped': 0, 'num_deleted': 1} ### `"full"` deletion mode[​](#full-deletion-mode "Direct link to full-deletion-mode") In `full` mode the user should pass the `full` universe of content that should be indexed into the indexing function. Any documents that are not passed into the indexing function and are present in the vectorstore will be deleted! This behavior is useful to handle deletions of source documents. _clear() all_docs = [doc1, doc2] index(all_docs, record_manager, vectorstore, cleanup="full", source_id_key="source") {'num_added': 2, 'num_updated': 0, 'num_skipped': 0, 'num_deleted': 0} Say someone deleted the first doc: del all_docs[0] all_docs [Document(page_content='doggy', metadata={'source': 'doggy.txt'})] Using full mode will clean up the deleted content as well. index(all_docs, record_manager, vectorstore, cleanup="full", source_id_key="source") {'num_added': 0, 'num_updated': 0, 'num_skipped': 1, 'num_deleted': 1} Source[​](#source "Direct link to Source") ------------------------------------------ The metadata attribute contains a field called `source`. This source should be pointing at the _ultimate_ provenance associated with the given document. For example, if these documents are representing chunks of some parent document, the `source` for both documents should be the same and reference the parent document. In general, `source` should always be specified. Only use a `None`, if you **never** intend to use `incremental` mode, and for some reason can't specify the `source` field correctly. from langchain_text_splitters import CharacterTextSplitter **API Reference:**[CharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.CharacterTextSplitter.html) doc1 = Document( page_content="kitty kitty kitty kitty kitty", metadata={"source": "kitty.txt"})doc2 = Document(page_content="doggy doggy the doggy", metadata={"source": "doggy.txt"}) new_docs = CharacterTextSplitter( separator="t", keep_separator=True, chunk_size=12, chunk_overlap=2).split_documents([doc1, doc2])new_docs [Document(page_content='kitty kit', metadata={'source': 'kitty.txt'}), Document(page_content='tty kitty ki', metadata={'source': 'kitty.txt'}), Document(page_content='tty kitty', metadata={'source': 'kitty.txt'}), Document(page_content='doggy doggy', metadata={'source': 'doggy.txt'}), Document(page_content='the doggy', metadata={'source': 'doggy.txt'})] _clear() index( new_docs, record_manager, vectorstore, cleanup="incremental", source_id_key="source",) {'num_added': 5, 'num_updated': 0, 'num_skipped': 0, 'num_deleted': 0} changed_doggy_docs = [ Document(page_content="woof woof", metadata={"source": "doggy.txt"}), Document(page_content="woof woof woof", metadata={"source": "doggy.txt"}),] This should delete the old versions of documents associated with `doggy.txt` source and replace them with the new versions. index( changed_doggy_docs, record_manager, vectorstore, cleanup="incremental", source_id_key="source",) {'num_added': 2, 'num_updated': 0, 'num_skipped': 0, 'num_deleted': 2} vectorstore.similarity_search("dog", k=30) [Document(page_content='woof woof', metadata={'source': 'doggy.txt'}), Document(page_content='woof woof woof', metadata={'source': 'doggy.txt'}), Document(page_content='tty kitty', metadata={'source': 'kitty.txt'}), Document(page_content='tty kitty ki', metadata={'source': 'kitty.txt'}), Document(page_content='kitty kit', metadata={'source': 'kitty.txt'})] Using with loaders[​](#using-with-loaders "Direct link to Using with loaders") ------------------------------------------------------------------------------ Indexing can accept either an iterable of documents or else any loader. **Attention:** The loader **must** set source keys correctly. from langchain_core.document_loaders import BaseLoaderclass MyCustomLoader(BaseLoader): def lazy_load(self): text_splitter = CharacterTextSplitter( separator="t", keep_separator=True, chunk_size=12, chunk_overlap=2 ) docs = [ Document(page_content="woof woof", metadata={"source": "doggy.txt"}), Document(page_content="woof woof woof", metadata={"source": "doggy.txt"}), ] yield from text_splitter.split_documents(docs) def load(self): return list(self.lazy_load()) **API Reference:**[BaseLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_core.document_loaders.base.BaseLoader.html) _clear() loader = MyCustomLoader() loader.load() [Document(page_content='woof woof', metadata={'source': 'doggy.txt'}), Document(page_content='woof woof woof', metadata={'source': 'doggy.txt'})] index(loader, record_manager, vectorstore, cleanup="full", source_id_key="source") {'num_added': 2, 'num_updated': 0, 'num_skipped': 0, 'num_deleted': 0} vectorstore.similarity_search("dog", k=30) [Document(page_content='woof woof', metadata={'source': 'doggy.txt'}), Document(page_content='woof woof woof', metadata={'source': 'doggy.txt'})] [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/indexing.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous Hybrid Search ](/v0.2/docs/how_to/hybrid/)[ Next How to inspect runnables ](/v0.2/docs/how_to/inspect/) * [How it works](#how-it-works) * [Deletion modes](#deletion-modes) * [Requirements](#requirements) * [Caution](#caution) * [Quickstart](#quickstart) * [`None` deletion mode](#none-deletion-mode) * [`"incremental"` deletion mode](#incremental-deletion-mode) * [`"full"` deletion mode](#full-deletion-mode) * [Source](#source) * [Using with loaders](#using-with-loaders)
null
https://python.langchain.com/v0.1/docs/get_started/introduction/
* [](/v0.1/) * Get started On this page Introduction ============ **LangChain** is a framework for developing applications powered by large language models (LLMs). LangChain simplifies every stage of the LLM application lifecycle: * **Development**: Build your applications using LangChain's open-source [building blocks](/v0.1/docs/expression_language/) and [components](/v0.1/docs/modules/). Hit the ground running using [third-party integrations](/v0.1/docs/integrations/platforms/) and [Templates](/v0.1/docs/templates/). * **Productionization**: Use [LangSmith](/v0.1/docs/langsmith/) to inspect, monitor and evaluate your chains, so that you can continuously optimize and deploy with confidence. * **Deployment**: Turn any chain into an API with [LangServe](/v0.1/docs/langserve/). ![Diagram outlining the hierarchical organization of the LangChain framework, displaying the interconnected parts across multiple layers.](/v0.1/svg/langchain_stack.svg "LangChain Framework Overview")![Diagram outlining the hierarchical organization of the LangChain framework, displaying the interconnected parts across multiple layers.](/v0.1/svg/langchain_stack_dark.svg "LangChain Framework Overview") Concretely, the framework consists of the following open-source libraries: * **`langchain-core`**: Base abstractions and LangChain Expression Language. * **`langchain-community`**: Third party integrations. * Partner packages (e.g. **`langchain-openai`**, **`langchain-anthropic`**, etc.): Some integrations have been further split into their own lightweight packages that only depend on **`langchain-core`**. * **`langchain`**: Chains, agents, and retrieval strategies that make up an application's cognitive architecture. * **[langgraph](https://langchain-ai.github.io/langgraph/)**: Build robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph. * **[langserve](/v0.1/docs/langserve/)**: Deploy LangChain chains as REST APIs. The broader ecosystem includes: * **[LangSmith](/v0.1/docs/langsmith/)**: A developer platform that lets you debug, test, evaluate, and monitor LLM applications and seamlessly integrates with LangChain. Get started[​](#get-started "Direct link to Get started") --------------------------------------------------------- We recommend following our [Quickstart](/v0.1/docs/get_started/quickstart/) guide to familiarize yourself with the framework by building your first LangChain application. [See here](/v0.1/docs/get_started/installation/) for instructions on how to install LangChain, set up your environment, and start building. note These docs focus on the Python LangChain library. [Head here](https://js.langchain.com) for docs on the JavaScript LangChain library. Use cases[​](#use-cases "Direct link to Use cases") --------------------------------------------------- If you're looking to build something specific or are more of a hands-on learner, check out our [use-cases](/v0.1/docs/use_cases/). They're walkthroughs and techniques for common end-to-end tasks, such as: * [Question answering with RAG](/v0.1/docs/use_cases/question_answering/) * [Extracting structured output](/v0.1/docs/use_cases/extraction/) * [Chatbots](/v0.1/docs/use_cases/chatbots/) * and more! Expression Language[​](#expression-language "Direct link to Expression Language") --------------------------------------------------------------------------------- LangChain Expression Language (LCEL) is the foundation of many of LangChain's components, and is a declarative way to compose chains. LCEL was designed from day 1 to support putting prototypes in production, with no code changes, from the simplest “prompt + LLM” chain to the most complex chains. * **[Get started](/v0.1/docs/expression_language/)**: LCEL and its benefits * **[Runnable interface](/v0.1/docs/expression_language/interface/)**: The standard interface for LCEL objects * **[Primitives](/v0.1/docs/expression_language/primitives/)**: More on the primitives LCEL includes * and more! Ecosystem[​](#ecosystem "Direct link to Ecosystem") --------------------------------------------------- ### [🦜🛠️ LangSmith](/v0.1/docs/langsmith/)[​](#️-langsmith "Direct link to ️-langsmith") Trace and evaluate your language model applications and intelligent agents to help you move from prototype to production. ### [🦜🕸️ LangGraph](https://langchain-ai.github.io/langgraph/)[​](#️-langgraph "Direct link to ️-langgraph") Build stateful, multi-actor applications with LLMs, built on top of (and intended to be used with) LangChain primitives. ### [🦜🏓 LangServe](/v0.1/docs/langserve/)[​](#-langserve "Direct link to -langserve") Deploy LangChain runnables and chains as REST APIs. [Security](/v0.1/docs/security/)[​](#security "Direct link to security") ------------------------------------------------------------------------ Read up on our [Security](/v0.1/docs/security/) best practices to make sure you're developing safely with LangChain. Additional resources[​](#additional-resources "Direct link to Additional resources") ------------------------------------------------------------------------------------ ### [Components](/v0.1/docs/modules/)[​](#components "Direct link to components") LangChain provides standard, extendable interfaces and integrations for many different components, including: ### [Integrations](/v0.1/docs/integrations/providers/)[​](#integrations "Direct link to integrations") LangChain is part of a rich ecosystem of tools that integrate with our framework and build on top of it. Check out our growing list of [integrations](/v0.1/docs/integrations/providers/). ### [Guides](/v0.1/docs/guides/)[​](#guides "Direct link to guides") Best practices for developing with LangChain. ### [API reference](https://api.python.langchain.com)[​](#api-reference "Direct link to api-reference") Head to the reference section for full documentation of all classes and methods in the LangChain and LangChain Experimental Python packages. ### [Contributing](/v0.1/docs/contributing/)[​](#contributing "Direct link to contributing") Check out the developer's guide for guidelines on contributing and help getting your dev environment set up. * * * #### Help us out by providing feedback on this documentation page: [ Next Introduction ](/v0.1/docs/get_started/introduction/) * [Get started](#get-started) * [Use cases](#use-cases) * [Expression Language](#expression-language) * [Ecosystem](#ecosystem) * [🦜🛠️ LangSmith](#️-langsmith) * [🦜🕸️ LangGraph](#️-langgraph) * [🦜🏓 LangServe](#-langserve) * [Security](#security) * [Additional resources](#additional-resources) * [Components](#components) * [Integrations](#integrations) * [Guides](#guides) * [API reference](#api-reference) * [Contributing](#contributing)
null
https://python.langchain.com/v0.2/docs/how_to/lcel_cheatsheet/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * LangChain Expression Language Cheatsheet On this page LangChain Expression Language Cheatsheet ======================================== This is a quick reference for all the most important LCEL primitives. For more advanced usage see the [LCEL how-to guides](/v0.2/docs/how_to/#langchain-expression-language-lcel) and the [full API reference](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html). ### Invoke a runnable[​](#invoke-a-runnable "Direct link to Invoke a runnable") #### [Runnable.invoke()](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.invoke) / [Runnable.ainvoke()](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.ainvoke)[​](#runnableinvoke--runnableainvoke "Direct link to runnableinvoke--runnableainvoke") from langchain_core.runnables import RunnableLambdarunnable = RunnableLambda(lambda x: str(x))runnable.invoke(5)# Async variant:# await runnable.ainvoke(5) **API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) '5' ### Batch a runnable[​](#batch-a-runnable "Direct link to Batch a runnable") #### [Runnable.batch()](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.batch) / [Runnable.abatch()](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.abatch)[​](#runnablebatch--runnableabatch "Direct link to runnablebatch--runnableabatch") from langchain_core.runnables import RunnableLambdarunnable = RunnableLambda(lambda x: str(x))runnable.batch([7, 8, 9])# Async variant:# await runnable.abatch([7, 8, 9]) **API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) ['7', '8', '9'] ### Stream a runnable[​](#stream-a-runnable "Direct link to Stream a runnable") #### [Runnable.stream()](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.stream) / [Runnable.astream()](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.astream)[​](#runnablestream--runnableastream "Direct link to runnablestream--runnableastream") from langchain_core.runnables import RunnableLambdadef func(x): for y in x: yield str(y)runnable = RunnableLambda(func)for chunk in runnable.stream(range(5)): print(chunk)# Async variant:# async for chunk in await runnable.astream(range(5)):# print(chunk) **API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) 01234 ### Compose runnables[​](#compose-runnables "Direct link to Compose runnables") #### Pipe operator `|`[​](#pipe-operator- "Direct link to pipe-operator-") from langchain_core.runnables import RunnableLambdarunnable1 = RunnableLambda(lambda x: {"foo": x})runnable2 = RunnableLambda(lambda x: [x] * 2)chain = runnable1 | runnable2chain.invoke(2) **API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) [{'foo': 2}, {'foo': 2}] ### Invoke runnables in parallel[​](#invoke-runnables-in-parallel "Direct link to Invoke runnables in parallel") #### [RunnableParallel](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html)[​](#runnableparallel "Direct link to runnableparallel") from langchain_core.runnables import RunnableLambda, RunnableParallelrunnable1 = RunnableLambda(lambda x: {"foo": x})runnable2 = RunnableLambda(lambda x: [x] * 2)chain = RunnableParallel(first=runnable1, second=runnable2)chain.invoke(2) **API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) | [RunnableParallel](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html) {'first': {'foo': 2}, 'second': [2, 2]} ### Turn any function into a runnable[​](#turn-any-function-into-a-runnable "Direct link to Turn any function into a runnable") #### [RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html)[​](#runnablelambda "Direct link to runnablelambda") from langchain_core.runnables import RunnableLambdadef func(x): return x + 5runnable = RunnableLambda(func)runnable.invoke(2) **API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) 7 ### Merge input and output dicts[​](#merge-input-and-output-dicts "Direct link to Merge input and output dicts") #### [RunnablePassthrough.assign](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html)[​](#runnablepassthroughassign "Direct link to runnablepassthroughassign") from langchain_core.runnables import RunnableLambda, RunnablePassthroughrunnable1 = RunnableLambda(lambda x: x["foo"] + 7)chain = RunnablePassthrough.assign(bar=runnable1)chain.invoke({"foo": 10}) **API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) | [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html) {'foo': 10, 'bar': 17} ### Include input dict in output dict[​](#include-input-dict-in-output-dict "Direct link to Include input dict in output dict") #### [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html)[​](#runnablepassthrough "Direct link to runnablepassthrough") from langchain_core.runnables import ( RunnableLambda, RunnableParallel, RunnablePassthrough,)runnable1 = RunnableLambda(lambda x: x["foo"] + 7)chain = RunnableParallel(bar=runnable1, baz=RunnablePassthrough())chain.invoke({"foo": 10}) **API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) | [RunnableParallel](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html) | [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html) {'bar': 17, 'baz': {'foo': 10}} ### Add default invocation args[​](#add-default-invocation-args "Direct link to Add default invocation args") #### [Runnable.bind](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.bind)[​](#runnablebind "Direct link to runnablebind") from typing import Optionalfrom langchain_core.runnables import RunnableLambdadef func(main_arg: dict, other_arg: Optional[str] = None) -> dict: if other_arg: return {**main_arg, **{"foo": other_arg}} return main_argrunnable1 = RunnableLambda(func)bound_runnable1 = runnable1.bind(other_arg="bye")bound_runnable1.invoke({"bar": "hello"}) **API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) {'bar': 'hello', 'foo': 'bye'} ### Add fallbacks[​](#add-fallbacks "Direct link to Add fallbacks") #### [Runnable.with\_fallbacks](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_fallbacks)[​](#runnablewith_fallbacks "Direct link to runnablewith_fallbacks") from langchain_core.runnables import RunnableLambdarunnable1 = RunnableLambda(lambda x: x + "foo")runnable2 = RunnableLambda(lambda x: str(x) + "foo")chain = runnable1.with_fallbacks([runnable2])chain.invoke(5) **API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) '5foo' ### Add retries[​](#add-retries "Direct link to Add retries") #### [Runnable.with\_retry](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_retry)[​](#runnablewith_retry "Direct link to runnablewith_retry") from langchain_core.runnables import RunnableLambdacounter = -1def func(x): global counter counter += 1 print(f"attempt with {counter=}") return x / counterchain = RunnableLambda(func).with_retry(stop_after_attempt=2)chain.invoke(2) **API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) attempt with counter=0attempt with counter=1 2.0 ### Configure runnable execution[​](#configure-runnable-execution "Direct link to Configure runnable execution") #### [RunnableConfig](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.config.RunnableConfig.html)[​](#runnableconfig "Direct link to runnableconfig") from langchain_core.runnables import RunnableLambda, RunnableParallelrunnable1 = RunnableLambda(lambda x: {"foo": x})runnable2 = RunnableLambda(lambda x: [x] * 2)runnable3 = RunnableLambda(lambda x: str(x))chain = RunnableParallel(first=runnable1, second=runnable2, third=runnable3)chain.invoke(7, config={"max_concurrency": 2}) **API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) | [RunnableParallel](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html) {'first': {'foo': 7}, 'second': [7, 7], 'third': '7'} ### Add default config to runnable[​](#add-default-config-to-runnable "Direct link to Add default config to runnable") #### [Runnable.with\_config](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_config)[​](#runnablewith_config "Direct link to runnablewith_config") from langchain_core.runnables import RunnableLambda, RunnableParallelrunnable1 = RunnableLambda(lambda x: {"foo": x})runnable2 = RunnableLambda(lambda x: [x] * 2)runnable3 = RunnableLambda(lambda x: str(x))chain = RunnableParallel(first=runnable1, second=runnable2, third=runnable3)configured_chain = chain.with_config(max_concurrency=2)chain.invoke(7) **API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) | [RunnableParallel](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html) {'first': {'foo': 7}, 'second': [7, 7], 'third': '7'} ### Make runnable attributes configurable[​](#make-runnable-attributes-configurable "Direct link to Make runnable attributes configurable") #### [Runnable.with\_configurable\_fields](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableSerializable.html#langchain_core.runnables.base.RunnableSerializable.configurable_fields)[​](#runnablewith_configurable_fields "Direct link to runnablewith_configurable_fields") from typing import Any, Optionalfrom langchain_core.runnables import ( ConfigurableField, RunnableConfig, RunnableSerializable,)class FooRunnable(RunnableSerializable[dict, dict]): output_key: str def invoke( self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any ) -> list: return self._call_with_config(self.subtract_seven, input, config, **kwargs) def subtract_seven(self, input: dict) -> dict: return {self.output_key: input["foo"] - 7}runnable1 = FooRunnable(output_key="bar")configurable_runnable1 = runnable1.configurable_fields( output_key=ConfigurableField(id="output_key"))configurable_runnable1.invoke( {"foo": 10}, config={"configurable": {"output_key": "not bar"}}) **API Reference:**[ConfigurableField](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.utils.ConfigurableField.html) | [RunnableConfig](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.config.RunnableConfig.html) | [RunnableSerializable](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableSerializable.html) {'not bar': 3} configurable_runnable1.invoke({"foo": 10}) {'bar': 3} ### Make chain components configurable[​](#make-chain-components-configurable "Direct link to Make chain components configurable") #### [Runnable.with\_configurable\_alternatives](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableSerializable.html#langchain_core.runnables.base.RunnableSerializable.configurable_alternatives)[​](#runnablewith_configurable_alternatives "Direct link to runnablewith_configurable_alternatives") from typing import Any, Optionalfrom langchain_core.runnables import RunnableConfig, RunnableLambda, RunnableParallelclass ListRunnable(RunnableSerializable[Any, list]): def invoke( self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any ) -> list: return self._call_with_config(self.listify, input, config, **kwargs) def listify(self, input: Any) -> list: return [input]class StrRunnable(RunnableSerializable[Any, str]): def invoke( self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any ) -> list: return self._call_with_config(self.strify, input, config, **kwargs) def strify(self, input: Any) -> str: return str(input)runnable1 = RunnableLambda(lambda x: {"foo": x})configurable_runnable = ListRunnable().configurable_alternatives( ConfigurableField(id="second_step"), default_key="list", string=StrRunnable())chain = runnable1 | configurable_runnablechain.invoke(7, config={"configurable": {"second_step": "string"}}) **API Reference:**[RunnableConfig](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.config.RunnableConfig.html) | [RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) | [RunnableParallel](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html) "{'foo': 7}" chain.invoke(7) [{'foo': 7}] ### Build a chain dynamically based on input[​](#build-a-chain-dynamically-based-on-input "Direct link to Build a chain dynamically based on input") from langchain_core.runnables import RunnableLambda, RunnableParallelrunnable1 = RunnableLambda(lambda x: {"foo": x})runnable2 = RunnableLambda(lambda x: [x] * 2)chain = RunnableLambda(lambda x: runnable1 if x > 6 else runnable2)chain.invoke(7) **API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) | [RunnableParallel](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html) {'foo': 7} chain.invoke(5) [5, 5] ### Generate a stream of events[​](#generate-a-stream-of-events "Direct link to Generate a stream of events") #### [Runnable.astream\_events](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.astream_events)[​](#runnableastream_events "Direct link to runnableastream_events") # | echo: falseimport nest_asyncionest_asyncio.apply() from langchain_core.runnables import RunnableLambda, RunnableParallelrunnable1 = RunnableLambda(lambda x: {"foo": x}, name="first")async def func(x): for _ in range(5): yield xrunnable2 = RunnableLambda(func, name="second")chain = runnable1 | runnable2async for event in chain.astream_events("bar", version="v2"): print(f"event={event['event']} | name={event['name']} | data={event['data']}") **API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) | [RunnableParallel](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html) event=on_chain_start | name=RunnableSequence | data={'input': 'bar'}event=on_chain_start | name=first | data={}event=on_chain_stream | name=first | data={'chunk': {'foo': 'bar'}}event=on_chain_start | name=second | data={}event=on_chain_end | name=first | data={'output': {'foo': 'bar'}, 'input': 'bar'}event=on_chain_stream | name=second | data={'chunk': {'foo': 'bar'}}event=on_chain_stream | name=RunnableSequence | data={'chunk': {'foo': 'bar'}}event=on_chain_stream | name=second | data={'chunk': {'foo': 'bar'}}event=on_chain_stream | name=RunnableSequence | data={'chunk': {'foo': 'bar'}}event=on_chain_stream | name=second | data={'chunk': {'foo': 'bar'}}event=on_chain_stream | name=RunnableSequence | data={'chunk': {'foo': 'bar'}}event=on_chain_stream | name=second | data={'chunk': {'foo': 'bar'}}event=on_chain_stream | name=RunnableSequence | data={'chunk': {'foo': 'bar'}}event=on_chain_stream | name=second | data={'chunk': {'foo': 'bar'}}event=on_chain_stream | name=RunnableSequence | data={'chunk': {'foo': 'bar'}}event=on_chain_end | name=second | data={'output': {'foo': 'bar'}, 'input': {'foo': 'bar'}}event=on_chain_end | name=RunnableSequence | data={'output': {'foo': 'bar'}} ### Yield batched outputs as they complete[​](#yield-batched-outputs-as-they-complete "Direct link to Yield batched outputs as they complete") #### [Runnable.batch\_as\_completed](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.batch_as_completed) / [Runnable.abatch\_as\_completed](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.abatch_as_completed)[​](#runnablebatch_as_completed--runnableabatch_as_completed "Direct link to runnablebatch_as_completed--runnableabatch_as_completed") import timefrom langchain_core.runnables import RunnableLambda, RunnableParallelrunnable1 = RunnableLambda(lambda x: time.sleep(x) or print(f"slept {x}"))for idx, result in runnable1.batch_as_completed([5, 1]): print(idx, result)# Async variant:# async for idx, result in runnable1.abatch_as_completed([5, 1]):# print(idx, result) **API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) | [RunnableParallel](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html) slept 11 Noneslept 50 None ### Return subset of output dict[​](#return-subset-of-output-dict "Direct link to Return subset of output dict") #### [Runnable.pick](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.pick)[​](#runnablepick "Direct link to runnablepick") from langchain_core.runnables import RunnableLambda, RunnablePassthroughrunnable1 = RunnableLambda(lambda x: x["baz"] + 5)chain = RunnablePassthrough.assign(foo=runnable1).pick(["foo", "bar"])chain.invoke({"bar": "hi", "baz": 2}) **API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) | [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html) {'foo': 7, 'bar': 'hi'} ### Declaratively make a batched version of a runnable[​](#declaratively-make-a-batched-version-of-a-runnable "Direct link to Declaratively make a batched version of a runnable") #### [Runnable.map](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.map)[​](#runnablemap "Direct link to runnablemap") from langchain_core.runnables import RunnableLambdarunnable1 = RunnableLambda(lambda x: list(range(x)))runnable2 = RunnableLambda(lambda x: x + 5)chain = runnable1 | runnable2.map()chain.invoke(3) **API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) [5, 6, 7] ### Get a graph representation of a runnable[​](#get-a-graph-representation-of-a-runnable "Direct link to Get a graph representation of a runnable") #### [Runnable.get\_graph](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.get_graph)[​](#runnableget_graph "Direct link to runnableget_graph") from langchain_core.runnables import RunnableLambda, RunnableParallelrunnable1 = RunnableLambda(lambda x: {"foo": x})runnable2 = RunnableLambda(lambda x: [x] * 2)runnable3 = RunnableLambda(lambda x: str(x))chain = runnable1 | RunnableParallel(second=runnable2, third=runnable3)chain.get_graph().print_ascii() **API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) | [RunnableParallel](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html) +-------------+ | LambdaInput | +-------------+ * * * +------------------------------+ | Lambda(lambda x: {'foo': x}) | +------------------------------+ * * * +-----------------------------+ | Parallel<second,third>Input | +-----------------------------+ **** *** **** **** ** ** +---------------------------+ +--------------------------+ | Lambda(lambda x: [x] * 2) | | Lambda(lambda x: str(x)) | +---------------------------+ +--------------------------+ **** *** **** **** ** ** +------------------------------+ | Parallel<second,third>Output | +------------------------------+ ### Get all prompts in a chain[​](#get-all-prompts-in-a-chain "Direct link to Get all prompts in a chain") #### [Runnable.get\_prompts](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.get_prompts)[​](#runnableget_prompts "Direct link to runnableget_prompts") from langchain_core.prompts import ChatPromptTemplatefrom langchain_core.runnables import RunnableLambdaprompt1 = ChatPromptTemplate.from_messages( [("system", "good ai"), ("human", "{input}")])prompt2 = ChatPromptTemplate.from_messages( [ ("system", "really good ai"), ("human", "{input}"), ("ai", "{ai_output}"), ("human", "{input2}"), ])fake_llm = RunnableLambda(lambda prompt: "i am good ai")chain = prompt1.assign(ai_output=fake_llm) | prompt2 | fake_llmfor i, prompt in enumerate(chain.get_prompts()): print(f"**prompt {i=}**\n") print(prompt.pretty_repr()) print("\n" * 3) **API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) **prompt i=0**================================ System Message ================================good ai================================ Human Message ================================={input}**prompt i=1**================================ System Message ================================really good ai================================ Human Message ================================={input}================================== AI Message =================================={ai_output}================================ Human Message ================================={input2} ### Add lifecycle listeners[​](#add-lifecycle-listeners "Direct link to Add lifecycle listeners") #### [Runnable.with\_listeners](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_listeners)[​](#runnablewith_listeners "Direct link to runnablewith_listeners") import timefrom langchain_core.runnables import RunnableLambdafrom langchain_core.tracers.schemas import Rundef on_start(run_obj: Run): print("start_time:", run_obj.start_time)def on_end(run_obj: Run): print("end_time:", run_obj.end_time)runnable1 = RunnableLambda(lambda x: time.sleep(x))chain = runnable1.with_listeners(on_start=on_start, on_end=on_end)chain.invoke(2) **API Reference:**[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) | [Run](https://api.python.langchain.com/en/latest/tracers/langchain_core.tracers.schemas.Run.html) start_time: 2024-05-17 23:04:00.951065+00:00end_time: 2024-05-17 23:04:02.958765+00:00 [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/lcel_cheatsheet.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to inspect runnables ](/v0.2/docs/how_to/inspect/)[ Next How to cache LLM responses ](/v0.2/docs/how_to/llm_caching/) * [Invoke a runnable](#invoke-a-runnable) * [Batch a runnable](#batch-a-runnable) * [Stream a runnable](#stream-a-runnable) * [Compose runnables](#compose-runnables) * [Invoke runnables in parallel](#invoke-runnables-in-parallel) * [Turn any function into a runnable](#turn-any-function-into-a-runnable) * [Merge input and output dicts](#merge-input-and-output-dicts) * [Include input dict in output dict](#include-input-dict-in-output-dict) * [Add default invocation args](#add-default-invocation-args) * [Add fallbacks](#add-fallbacks) * [Add retries](#add-retries) * [Configure runnable execution](#configure-runnable-execution) * [Add default config to runnable](#add-default-config-to-runnable) * [Make runnable attributes configurable](#make-runnable-attributes-configurable) * [Make chain components configurable](#make-chain-components-configurable) * [Build a chain dynamically based on input](#build-a-chain-dynamically-based-on-input) * [Generate a stream of events](#generate-a-stream-of-events) * [Yield batched outputs as they complete](#yield-batched-outputs-as-they-complete) * [Return subset of output dict](#return-subset-of-output-dict) * [Declaratively make a batched version of a runnable](#declaratively-make-a-batched-version-of-a-runnable) * [Get a graph representation of a runnable](#get-a-graph-representation-of-a-runnable) * [Get all prompts in a chain](#get-all-prompts-in-a-chain) * [Add lifecycle listeners](#add-lifecycle-listeners)
null
https://python.langchain.com/v0.2/docs/how_to/llm_token_usage_tracking/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to track token usage for LLMs On this page How to track token usage for LLMs ================================= Tracking token usage to calculate cost is an important part of putting your app in production. This guide goes over how to obtain this information from your LangChain model calls. Prerequisites This guide assumes familiarity with the following concepts: * [LLMs](/v0.2/docs/concepts/#llms) Using LangSmith[​](#using-langsmith "Direct link to Using LangSmith") --------------------------------------------------------------------- You can use [LangSmith](https://www.langchain.com/langsmith) to help track token usage in your LLM application. See the [LangSmith quick start guide](https://docs.smith.langchain.com/). Using callbacks[​](#using-callbacks "Direct link to Using callbacks") --------------------------------------------------------------------- There are some API-specific callback context managers that allow you to track token usage across multiple calls. You'll need to check whether such an integration is available for your particular model. If such an integration is not available for your model, you can create a custom callback manager by adapting the implementation of the [OpenAI callback manager](https://api.python.langchain.com/en/latest/_modules/langchain_community/callbacks/openai_info.html#OpenAICallbackHandler). ### OpenAI[​](#openai "Direct link to OpenAI") Let's first look at an extremely simple example of tracking token usage for a single Chat model call. danger The callback handler does not currently support streaming token counts for legacy language models (e.g., `langchain_openai.OpenAI`). For support in a streaming context, refer to the corresponding guide for chat models [here](/v0.2/docs/how_to/chat_token_usage_tracking/). ### Single call[​](#single-call "Direct link to Single call") from langchain_community.callbacks import get_openai_callbackfrom langchain_openai import OpenAIllm = OpenAI(model_name="gpt-3.5-turbo-instruct")with get_openai_callback() as cb: result = llm.invoke("Tell me a joke") print(result) print("---")print()print(f"Total Tokens: {cb.total_tokens}")print(f"Prompt Tokens: {cb.prompt_tokens}")print(f"Completion Tokens: {cb.completion_tokens}")print(f"Total Cost (USD): ${cb.total_cost}") **API Reference:**[get\_openai\_callback](https://api.python.langchain.com/en/latest/callbacks/langchain_community.callbacks.manager.get_openai_callback.html) | [OpenAI](https://api.python.langchain.com/en/latest/llms/langchain_openai.llms.base.OpenAI.html) Why don't scientists trust atoms?Because they make up everything.---Total Tokens: 18Prompt Tokens: 4Completion Tokens: 14Total Cost (USD): $3.4e-05 ### Multiple calls[​](#multiple-calls "Direct link to Multiple calls") Anything inside the context manager will get tracked. Here's an example of using it to track multiple calls in sequence to a chain. This will also work for an agent which may use multiple steps. from langchain_community.callbacks import get_openai_callbackfrom langchain_core.prompts import PromptTemplatefrom langchain_openai import OpenAIllm = OpenAI(model_name="gpt-3.5-turbo-instruct")template = PromptTemplate.from_template("Tell me a joke about {topic}")chain = template | llmwith get_openai_callback() as cb: response = chain.invoke({"topic": "birds"}) print(response) response = chain.invoke({"topic": "fish"}) print("--") print(response)print()print("---")print(f"Total Tokens: {cb.total_tokens}")print(f"Prompt Tokens: {cb.prompt_tokens}")print(f"Completion Tokens: {cb.completion_tokens}")print(f"Total Cost (USD): ${cb.total_cost}") **API Reference:**[get\_openai\_callback](https://api.python.langchain.com/en/latest/callbacks/langchain_community.callbacks.manager.get_openai_callback.html) | [PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html) | [OpenAI](https://api.python.langchain.com/en/latest/llms/langchain_openai.llms.base.OpenAI.html) Why did the chicken go to the seance?To talk to the other side of the road!--Why did the fish need a lawyer?Because it got caught in a net!---Total Tokens: 50Prompt Tokens: 12Completion Tokens: 38Total Cost (USD): $9.400000000000001e-05 Streaming[​](#streaming "Direct link to Streaming") --------------------------------------------------- danger `get_openai_callback` does not currently support streaming token counts for legacy language models (e.g., `langchain_openai.OpenAI`). If you want to count tokens correctly in a streaming context, there are a number of options: * Use chat models as described in [this guide](/v0.2/docs/how_to/chat_token_usage_tracking/); * Implement a [custom callback handler](/v0.2/docs/how_to/custom_callbacks/) that uses appropriate tokenizers to count the tokens; * Use a monitoring platform such as [LangSmith](https://www.langchain.com/langsmith). Note that when using legacy language models in a streaming context, token counts are not updated: from langchain_community.callbacks import get_openai_callbackfrom langchain_openai import OpenAIllm = OpenAI(model_name="gpt-3.5-turbo-instruct")with get_openai_callback() as cb: for chunk in llm.stream("Tell me a joke"): print(chunk, end="", flush=True) print(result) print("---")print()print(f"Total Tokens: {cb.total_tokens}")print(f"Prompt Tokens: {cb.prompt_tokens}")print(f"Completion Tokens: {cb.completion_tokens}")print(f"Total Cost (USD): ${cb.total_cost}") **API Reference:**[get\_openai\_callback](https://api.python.langchain.com/en/latest/callbacks/langchain_community.callbacks.manager.get_openai_callback.html) | [OpenAI](https://api.python.langchain.com/en/latest/llms/langchain_openai.llms.base.OpenAI.html) Why don't scientists trust atoms?Because they make up everything!Why don't scientists trust atoms?Because they make up everything.---Total Tokens: 0Prompt Tokens: 0Completion Tokens: 0Total Cost (USD): $0.0 [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/llm_token_usage_tracking.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to cache LLM responses ](/v0.2/docs/how_to/llm_caching/)[ Next Run LLMs locally ](/v0.2/docs/how_to/local_llms/) * [Using LangSmith](#using-langsmith) * [Using callbacks](#using-callbacks) * [OpenAI](#openai) * [Single call](#single-call) * [Multiple calls](#multiple-calls) * [Streaming](#streaming)
null
https://python.langchain.com/v0.2/docs/contributing/
* [](/v0.2/) * Contributing * Welcome Contributors On this page Welcome Contributors ==================== Hi there! Thank you for even being interested in contributing to LangChain. As an open-source project in a rapidly developing field, we are extremely open to contributions, whether they involve new features, improved infrastructure, better documentation, or bug fixes. 🗺️ Guidelines[​](#️-guidelines "Direct link to 🗺️ Guidelines") ---------------------------------------------------------------- ### 👩‍💻 Ways to contribute[​](#-ways-to-contribute "Direct link to 👩‍💻 Ways to contribute") There are many ways to contribute to LangChain. Here are some common ways people contribute: * [**Documentation**](/v0.2/docs/contributing/documentation/style_guide/): Help improve our docs, including this one! * [**Code**](/v0.2/docs/contributing/code/): Help us write code, fix bugs, or improve our infrastructure. * [**Integrations**](/v0.2/docs/contributing/integrations/): Help us integrate with your favorite vendors and tools. * [**Discussions**](https://github.com/langchain-ai/langchain/discussions): Help answer usage questions and discuss issues with users. ### 🚩 GitHub Issues[​](#-github-issues "Direct link to 🚩 GitHub Issues") Our [issues](https://github.com/langchain-ai/langchain/issues) page is kept up to date with bugs, improvements, and feature requests. There is a taxonomy of labels to help with sorting and discovery of issues of interest. Please use these to help organize issues. If you start working on an issue, please assign it to yourself. If you are adding an issue, please try to keep it focused on a single, modular bug/improvement/feature. If two issues are related, or blocking, please link them rather than combining them. We will try to keep these issues as up-to-date as possible, though with the rapid rate of development in this field some may get out of date. If you notice this happening, please let us know. ### 💭 GitHub Discussions[​](#-github-discussions "Direct link to 💭 GitHub Discussions") We have a [discussions](https://github.com/langchain-ai/langchain/discussions) page where users can ask usage questions, discuss design decisions, and propose new features. If you are able to help answer questions, please do so! This will allow the maintainers to spend more time focused on development and bug fixing. ### 🙋 Getting Help[​](#-getting-help "Direct link to 🙋 Getting Help") Our goal is to have the simplest developer setup possible. Should you experience any difficulty getting setup, please contact a maintainer! Not only do we want to help get you unblocked, but we also want to make sure that the process is smooth for future contributors. In a similar vein, we do enforce certain linting, formatting, and documentation standards in the codebase. If you are finding these difficult (or even just annoying) to work with, feel free to contact a maintainer for help - we do not want these to get in the way of getting good code into the codebase. ### 🌟 Recognition[​](#-recognition "Direct link to 🌟 Recognition") If your contribution has made its way into a release, we will want to give you credit on Twitter (only if you want though)! If you have a Twitter account you would like us to mention, please let us know in the PR or through another means. [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/contributing/index.mdx) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Next Repository Structure ](/v0.2/docs/contributing/repo_structure/) * [🗺️ Guidelines](#️-guidelines) * [👩‍💻 Ways to contribute](#-ways-to-contribute) * [🚩 GitHub Issues](#-github-issues) * [💭 GitHub Discussions](#-github-discussions) * [🙋 Getting Help](#-getting-help) * [🌟 Recognition](#-recognition)
null
https://python.langchain.com/v0.2/docs/additional_resources/tutorials/
On this page 3rd Party Tutorials =================== Tutorials[​](#tutorials "Direct link to Tutorials") --------------------------------------------------- ### [LangChain v 0.1 by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae0gBSJ9T0w7cu7iJZbH3T31)[​](#langchain-v-01-by-langchainai "Direct link to langchain-v-01-by-langchainai") ### [Build with Langchain - Advanced by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae06tclDATrMYY0idsTdLg9v)[​](#build-with-langchain---advanced-by-langchainai "Direct link to build-with-langchain---advanced-by-langchainai") ### [LangGraph by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae16n2TWUkKq5PgJ0w6Pkwtg)[​](#langgraph-by-langchainai "Direct link to langgraph-by-langchainai") ### [by Greg Kamradt](https://www.youtube.com/playlist?list=PLqZXAkvF1bPNQER9mLmDbntNfSpzdDIU5)[​](#by-greg-kamradt "Direct link to by-greg-kamradt") ### [by Sam Witteveen](https://www.youtube.com/playlist?list=PL8motc6AQftk1Bs42EW45kwYbyJ4jOdiZ)[​](#by-sam-witteveen "Direct link to by-sam-witteveen") ### [by James Briggs](https://www.youtube.com/playlist?list=PLIUOU7oqGTLieV9uTIFMm6_4PXg-hlN6F)[​](#by-james-briggs "Direct link to by-james-briggs") ### [by Prompt Engineering](https://www.youtube.com/playlist?list=PLVEEucA9MYhOu89CX8H3MBZqayTbcCTMr)[​](#by-prompt-engineering "Direct link to by-prompt-engineering") ### [by Mayo Oshin](https://www.youtube.com/@chatwithdata/search?query=langchain)[​](#by-mayo-oshin "Direct link to by-mayo-oshin") ### [by 1 little Coder](https://www.youtube.com/playlist?list=PLpdmBGJ6ELUK-v0MK-t4wZmVEbxM5xk6L)[​](#by-1-little-coder "Direct link to by-1-little-coder") ### [by BobLin (Chinese language)](https://www.youtube.com/playlist?list=PLbd7ntv6PxC3QMFQvtWfk55p-Op_syO1C)[​](#by-boblin-chinese-language "Direct link to by-boblin-chinese-language") Courses[​](#courses "Direct link to Courses") --------------------------------------------- ### Featured courses on Deeplearning.AI[​](#featured-courses-on-deeplearningai "Direct link to Featured courses on Deeplearning.AI") * [LangChain for LLM Application Development](https://www.deeplearning.ai/short-courses/langchain-for-llm-application-development/) * [LangChain Chat with Your Data](https://www.deeplearning.ai/short-courses/langchain-chat-with-your-data/) * [Functions, Tools and Agents with LangChain](https://www.deeplearning.ai/short-courses/functions-tools-agents-langchain/) * [Build LLM Apps with LangChain.js](https://www.deeplearning.ai/short-courses/build-llm-apps-with-langchain-js/) ### Online courses[​](#online-courses "Direct link to Online courses") * [Udemy](https://www.udemy.com/courses/search/?q=langchain) * [DataCamp](https://www.datacamp.com/courses/developing-llm-applications-with-langchain) * [Pluralsight](https://www.pluralsight.com/search?q=langchain) * [Coursera](https://www.coursera.org/search?query=langchain) * [Maven](https://maven.com/courses?query=langchain) * [Udacity](https://www.udacity.com/catalog/all/any-price/any-school/any-skill/any-difficulty/any-duration/any-type/relevance/page-1?searchValue=langchain) * [LinkedIn Learning](https://www.linkedin.com/search/results/learning/?keywords=langchain) * [edX](https://www.edx.org/search?q=langchain) * [freeCodeCamp](https://www.youtube.com/@freecodecamp/search?query=langchain) Short Tutorials[​](#short-tutorials "Direct link to Short Tutorials") --------------------------------------------------------------------- * [by Nicholas Renotte](https://youtu.be/MlK6SIjcjE8) * [by Patrick Loeber](https://youtu.be/LbT1yp6quS8) * [by Rabbitmetrics](https://youtu.be/aywZrzNaKjs) * [by Ivan Reznikov](https://medium.com/@ivanreznikov/langchain-101-course-updated-668f7b41d6cb) Books and Handbooks[​](#books-and-handbooks "Direct link to Books and Handbooks") --------------------------------------------------------------------------------- * [Generative AI with LangChain](https://www.amazon.com/Generative-AI-LangChain-language-ChatGPT/dp/1835083463/ref=sr_1_1?crid=1GMOMH0G7GLR&keywords=generative+ai+with+langchain&qid=1703247181&sprefix=%2Caps%2C298&sr=8-1) by [Ben Auffrath](https://www.amazon.com/stores/Ben-Auffarth/author/B08JQKSZ7D?ref=ap_rdr&store_ref=ap_rdr&isDramIntegrated=true&shoppingPortalEnabled=true), ©️ 2023 Packt Publishing * [LangChain AI Handbook](https://www.pinecone.io/learn/langchain/) By **James Briggs** and **Francisco Ingham** * [LangChain Cheatsheet](https://pub.towardsai.net/langchain-cheatsheet-all-secrets-on-a-single-page-8be26b721cde) by **Ivan Reznikov** * [Dive into Langchain (Chinese language)](https://langchain.boblin.app/) * * * [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/additional_resources/tutorials.mdx) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). * [Tutorials](#tutorials) * [LangChain v 0.1 by LangChain.ai](#langchain-v-01-by-langchainai) * [Build with Langchain - Advanced by LangChain.ai](#build-with-langchain---advanced-by-langchainai) * [LangGraph by LangChain.ai](#langgraph-by-langchainai) * [by Greg Kamradt](#by-greg-kamradt) * [by Sam Witteveen](#by-sam-witteveen) * [by James Briggs](#by-james-briggs) * [by Prompt Engineering](#by-prompt-engineering) * [by Mayo Oshin](#by-mayo-oshin) * [by 1 little Coder](#by-1-little-coder) * [by BobLin (Chinese language)](#by-boblin-chinese-language) * [Courses](#courses) * [Featured courses on Deeplearning.AI](#featured-courses-on-deeplearningai) * [Online courses](#online-courses) * [Short Tutorials](#short-tutorials) * [Books and Handbooks](#books-and-handbooks)
null
https://python.langchain.com/v0.2/docs/templates/
* [](/v0.2/) * Templates On this page Templates ========= Highlighting a few different categories of templates ⭐ Popular[​](#-popular "Direct link to ⭐ Popular") -------------------------------------------------- These are some of the more popular templates to get started with. * [Retrieval Augmented Generation Chatbot](/v0.2/docs/templates/rag-conversation/): Build a chatbot over your data. Defaults to OpenAI and PineconeVectorStore. * [Extraction with OpenAI Functions](/v0.2/docs/templates/extraction-openai-functions/): Do extraction of structured data from unstructured data. Uses OpenAI function calling. * [Local Retrieval Augmented Generation](/v0.2/docs/templates/rag-chroma-private/): Build a chatbot over your data. Uses only local tooling: Ollama, GPT4all, Chroma. * [OpenAI Functions Agent](/v0.2/docs/templates/openai-functions-agent/): Build a chatbot that can take actions. Uses OpenAI function calling and Tavily. * [XML Agent](/v0.2/docs/templates/xml-agent/): Build a chatbot that can take actions. Uses Anthropic and You.com. 📥 Advanced Retrieval[​](#-advanced-retrieval "Direct link to 📥 Advanced Retrieval") ------------------------------------------------------------------------------------- These templates cover advanced retrieval techniques, which can be used for chat and QA over databases or documents. * [Reranking](/v0.2/docs/templates/rag-pinecone-rerank/): This retrieval technique uses Cohere's reranking endpoint to rerank documents from an initial retrieval step. * [Anthropic Iterative Search](/v0.2/docs/templates/anthropic-iterative-search/): This retrieval technique uses iterative prompting to determine what to retrieve and whether the retriever documents are good enough. * **Parent Document Retrieval** using [Neo4j](/v0.2/docs/templates/neo4j-parent/) or [MongoDB](/v0.2/docs/templates/mongo-parent-document-retrieval/): This retrieval technique stores embeddings for smaller chunks, but then returns larger chunks to pass to the model for generation. * [Semi-Structured RAG](/v0.2/docs/templates/rag-semi-structured/): The template shows how to do retrieval over semi-structured data (e.g. data that involves both text and tables). * [Temporal RAG](/v0.2/docs/templates/rag-timescale-hybrid-search-time/): The template shows how to do hybrid search over data with a time-based component using [Timescale Vector](https://www.timescale.com/ai?utm_campaign=vectorlaunch&utm_source=langchain&utm_medium=referral). 🔍Advanced Retrieval - Query Transformation[​](#advanced-retrieval---query-transformation "Direct link to 🔍Advanced Retrieval - Query Transformation") ------------------------------------------------------------------------------------------------------------------------------------------------------- A selection of advanced retrieval methods that involve transforming the original user query, which can improve retrieval quality. * [Hypothetical Document Embeddings](/v0.2/docs/templates/hyde/): A retrieval technique that generates a hypothetical document for a given query, and then uses the embedding of that document to do semantic search. [Paper](https://arxiv.org/abs/2212.10496). * [Rewrite-Retrieve-Read](/v0.2/docs/templates/rewrite-retrieve-read/): A retrieval technique that rewrites a given query before passing it to a search engine. [Paper](https://arxiv.org/abs/2305.14283). * [Step-back QA Prompting](/v0.2/docs/templates/stepback-qa-prompting/): A retrieval technique that generates a "step-back" question and then retrieves documents relevant to both that question and the original question. [Paper](https://arxiv.org/abs//2310.06117). * [RAG-Fusion](/v0.2/docs/templates/rag-fusion/): A retrieval technique that generates multiple queries and then reranks the retrieved documents using reciprocal rank fusion. [Article](https://towardsdatascience.com/forget-rag-the-future-is-rag-fusion-1147298d8ad1). * [Multi-Query Retriever](/v0.2/docs/templates/rag-pinecone-multi-query/): This retrieval technique uses an LLM to generate multiple queries and then fetches documents for all queries. 🧠Advanced Retrieval - Query Construction[​](#advanced-retrieval---query-construction "Direct link to 🧠Advanced Retrieval - Query Construction") ------------------------------------------------------------------------------------------------------------------------------------------------- A selection of advanced retrieval methods that involve constructing a query in a separate DSL from natural language, which enable natural language chat over various structured databases. * [Elastic Query Generator](/v0.2/docs/templates/elastic-query-generator/): Generate elastic search queries from natural language. * [Neo4j Cypher Generation](/v0.2/docs/templates/neo4j-cypher/): Generate cypher statements from natural language. Available with a ["full text" option](/v0.2/docs/templates/neo4j-cypher-ft/) as well. * [Supabase Self Query](/v0.2/docs/templates/self-query-supabase/): Parse a natural language query into a semantic query as well as a metadata filter for Supabase. 🦙 OSS Models[​](#-oss-models "Direct link to 🦙 OSS Models") ------------------------------------------------------------- These templates use OSS models, which enable privacy for sensitive data. * [Local Retrieval Augmented Generation](/v0.2/docs/templates/rag-chroma-private/): Build a chatbot over your data. Uses only local tooling: Ollama, GPT4all, Chroma. * [SQL Question Answering (Replicate)](/v0.2/docs/templates/sql-llama2/): Question answering over a SQL database, using Llama2 hosted on [Replicate](https://replicate.com/). * [SQL Question Answering (LlamaCpp)](/v0.2/docs/templates/sql-llamacpp/): Question answering over a SQL database, using Llama2 through [LlamaCpp](https://github.com/ggerganov/llama.cpp). * [SQL Question Answering (Ollama)](/v0.2/docs/templates/sql-ollama/): Question answering over a SQL database, using Llama2 through [Ollama](https://github.com/jmorganca/ollama). ⛏️ Extraction[​](#️-extraction "Direct link to ⛏️ Extraction") -------------------------------------------------------------- These templates extract data in a structured format based upon a user-specified schema. * [Extraction Using OpenAI Functions](/v0.2/docs/templates/extraction-openai-functions/): Extract information from text using OpenAI Function Calling. * [Extraction Using Anthropic Functions](/v0.2/docs/templates/extraction-anthropic-functions/): Extract information from text using a LangChain wrapper around the Anthropic endpoints intended to simulate function calling. * [Extract BioTech Plate Data](/v0.2/docs/templates/plate-chain/): Extract microplate data from messy Excel spreadsheets into a more normalized format. ⛏️Summarization and tagging[​](#️summarization-and-tagging "Direct link to ⛏️Summarization and tagging") -------------------------------------------------------------------------------------------------------- These templates summarize or categorize documents and text. * [Summarization using Anthropic](/v0.2/docs/templates/summarize-anthropic/): Uses Anthropic's Claude2 to summarize long documents. 🤖 Agents[​](#-agents "Direct link to 🤖 Agents") ------------------------------------------------- These templates build chatbots that can take actions, helping to automate tasks. * [OpenAI Functions Agent](/v0.2/docs/templates/openai-functions-agent/): Build a chatbot that can take actions. Uses OpenAI function calling and Tavily. * [XML Agent](/v0.2/docs/templates/xml-agent/): Build a chatbot that can take actions. Uses Anthropic and You.com. 🚨 Safety and evaluation[​](#-safety-and-evaluation "Direct link to 🚨 Safety and evaluation") ---------------------------------------------------------------------------------------------- These templates enable moderation or evaluation of LLM outputs. * [Guardrails Output Parser](/v0.2/docs/templates/guardrails-output-parser/): Use guardrails-ai to validate LLM output. * [Chatbot Feedback](/v0.2/docs/templates/chat-bot-feedback/): Use LangSmith to evaluate chatbot responses. * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Next anthropic-iterative-search ](/v0.2/docs/templates/anthropic-iterative-search/) * [⭐ Popular](#-popular) * [📥 Advanced Retrieval](#-advanced-retrieval) * [🔍Advanced Retrieval - Query Transformation](#advanced-retrieval---query-transformation) * [🧠Advanced Retrieval - Query Construction](#advanced-retrieval---query-construction) * [🦙 OSS Models](#-oss-models) * [⛏️ Extraction](#️-extraction) * [⛏️Summarization and tagging](#️summarization-and-tagging) * [🤖 Agents](#-agents) * [🚨 Safety and evaluation](#-safety-and-evaluation)
null
https://python.langchain.com/v0.2/docs/people/
People ====== There are some incredible humans from all over the world who have been instrumental in helping the LangChain community flourish 🌐! This page highlights a few of those folks who have dedicated their time to the open-source repo in the form of direct contributions and reviews. Top reviewers[​](#top-reviewers "Direct link to Top reviewers") --------------------------------------------------------------- As LangChain has grown, the amount of surface area that maintainers cover has grown as well. Thank you to the following folks who have gone above and beyond in reviewing incoming PRs 🙏! [![](https://avatars.githubusercontent.com/u/2256422?v=4)](https://github.com/leo-gan)[@leo-gan](https://github.com/leo-gan) [![](https://avatars.githubusercontent.com/u/11026406?v=4)](https://github.com/lkuligin)[@lkuligin](https://github.com/lkuligin) [![](https://avatars.githubusercontent.com/u/11633333?u=e13817e11b3fb8c3d209d747c77a0f0742d11138&v=4)](https://github.com/cbornet)[@cbornet](https://github.com/cbornet) [![](https://avatars.githubusercontent.com/u/289369?u=80655eb5f9a4d03bf1a526b07a67adc6eacccc6b&v=4)](https://github.com/3coins)[@3coins](https://github.com/3coins) [![](https://avatars.githubusercontent.com/u/48236177?u=757490c6af76be0a8837dd5886991005a23c89c7&v=4)](https://github.com/liugddx)[@liugddx](https://github.com/liugddx) [![](https://avatars.githubusercontent.com/u/49480?u=4a9b7c8820211aae14da7f72f617d88019a06569&v=4)](https://github.com/joemcelroy)[@joemcelroy](https://github.com/joemcelroy) [![](https://avatars.githubusercontent.com/u/67427?v=4)](https://github.com/jexp)[@jexp](https://github.com/jexp) [![](https://avatars.githubusercontent.com/u/44113430?u=34bdaacaeb2880e40fb4b07897c481771c6de544&v=4)](https://github.com/mspronesti)[@mspronesti](https://github.com/mspronesti) [![](https://avatars.githubusercontent.com/u/8429627?u=d28653fbd93c966ac840f93a05f0ef949495851f&v=4)](https://github.com/JohnNay)[@JohnNay](https://github.com/JohnNay) [![](https://avatars.githubusercontent.com/u/749277?u=84aeb7b75146a67f8b18b389dc591ba72ef105e4&v=4)](https://github.com/tjaffri)[@tjaffri](https://github.com/tjaffri) [![](https://avatars.githubusercontent.com/u/72488598?u=98dc24a63369cbae14913caff5f379f80f305aab&v=4)](https://github.com/Undertone0809)[@Undertone0809](https://github.com/Undertone0809) [![](https://avatars.githubusercontent.com/u/6690839?u=e56c2161ddc98c58b01fb82da4076e5400fb1e6d&v=4)](https://github.com/sjwhitmore)[@sjwhitmore](https://github.com/sjwhitmore) [![](https://avatars.githubusercontent.com/u/13262395?u=430eff10dfbb7d3f27a35f1ea2c9ea6a61067c88&v=4)](https://github.com/holtskinner)[@holtskinner](https://github.com/holtskinner) [![](https://avatars.githubusercontent.com/u/19948365?v=4)](https://github.com/tomasonjo)[@tomasonjo](https://github.com/tomasonjo) [![](https://avatars.githubusercontent.com/u/62768671?u=279f772a5b8325a191a1a8bb623aa40f32a01856&v=4)](https://github.com/skcoirz)[@skcoirz](https://github.com/skcoirz) [![](https://avatars.githubusercontent.com/u/20304844?u=f00461bcedad6ba384a4e234a44c906802448b4e&v=4)](https://github.com/tylerhutcherson)[@tylerhutcherson](https://github.com/tylerhutcherson) [![](https://avatars.githubusercontent.com/u/13009163?u=c2b3a11cceaadbc9415f545b971250c9e2b2078b&v=4)](https://github.com/Spartee)[@Spartee](https://github.com/Spartee) [![](https://avatars.githubusercontent.com/u/19181718?u=79a9013dea28a7fa654431cd7e89b08dc76434dd&v=4)](https://github.com/sepiatone)[@sepiatone](https://github.com/sepiatone) [![](https://avatars.githubusercontent.com/u/123224380?v=4)](https://github.com/scadEfUr)[@scadEfUr](https://github.com/scadEfUr) [![](https://avatars.githubusercontent.com/u/1635179?u=0631cb84ca580089198114f94d9c27efe730220e&v=4)](https://github.com/MthwRobinson)[@MthwRobinson](https://github.com/MthwRobinson) [![](https://avatars.githubusercontent.com/u/891664?u=722172a0061f68ab22819fa88a354ec973f70a63&v=4)](https://github.com/jeffchuber)[@jeffchuber](https://github.com/jeffchuber) [![](https://avatars.githubusercontent.com/u/2649301?u=5e688d2b90ddcafd5028a9da292010144cad6d18&v=4)](https://github.com/kacperlukawski)[@kacperlukawski](https://github.com/kacperlukawski) [![](https://avatars.githubusercontent.com/u/25930426?v=4)](https://github.com/pranjaldoshi96)[@pranjaldoshi96](https://github.com/pranjaldoshi96) [![](https://avatars.githubusercontent.com/u/46051506?u=026f5f140e8b7ba4744bf971f9ebdea9ebab67ca&v=4)](https://github.com/Anush008)[@Anush008](https://github.com/Anush008) [![](https://avatars.githubusercontent.com/u/23314389?u=2014e20e246530fa89bd902fe703b6f9e6ecf833&v=4)](https://github.com/nicoloboschi)[@nicoloboschi](https://github.com/nicoloboschi) [![](https://avatars.githubusercontent.com/u/5015933?u=80e339672a321cde25f4b484129bbddfefb2356d&v=4)](https://github.com/ShaneHarvey)[@ShaneHarvey](https://github.com/ShaneHarvey) [![](https://avatars.githubusercontent.com/u/13749212?u=b58700c3bd236e880223bccba53b7ad0dd4d7003&v=4)](https://github.com/eavanvalkenburg)[@eavanvalkenburg](https://github.com/eavanvalkenburg) [![](https://avatars.githubusercontent.com/u/1097932?u=0e9c1cc9e2c02469e52963322344af181464bf43&v=4)](https://github.com/gengliangwang)[@gengliangwang](https://github.com/gengliangwang) [![](https://avatars.githubusercontent.com/u/39497902?u=0c1597698c6f28da87d80ac0de9c8276d5ab63e9&v=4)](https://github.com/dbczumar)[@dbczumar](https://github.com/dbczumar) [![](https://avatars.githubusercontent.com/u/17039389?u=796226152becf82c4d7fd5cc49a24e58a73ce66f&v=4)](https://github.com/harupy)[@harupy](https://github.com/harupy) [![](https://avatars.githubusercontent.com/u/251292?u=a7465aae734d2cbc12d26b885b07d466d969bf0c&v=4)](https://github.com/jmorganca)[@jmorganca](https://github.com/jmorganca) [![](https://avatars.githubusercontent.com/u/2096628?u=2a4822ff8dc6b4f1162c58716d48fdfac08c8601&v=4)](https://github.com/blink1073)[@blink1073](https://github.com/blink1073) [![](https://avatars.githubusercontent.com/u/14221764?u=47a1405343b4d92caed3744e82dda1d28d01a251&v=4)](https://github.com/hemidactylus)[@hemidactylus](https://github.com/hemidactylus) [![](https://avatars.githubusercontent.com/u/101075607?v=4)](https://github.com/andersenchen)[@andersenchen](https://github.com/andersenchen) [![](https://avatars.githubusercontent.com/u/43734688?u=78f139fa940620e301361a58821c9f56128f71d9&v=4)](https://github.com/sam-h-bean)[@sam-h-bean](https://github.com/sam-h-bean) [![](https://avatars.githubusercontent.com/u/20311743?u=29bf2391ae34297a12a88d813731b0bdf289e4a5&v=4)](https://github.com/nickscamara)[@nickscamara](https://github.com/nickscamara) [![](https://avatars.githubusercontent.com/u/89161683?u=4a59b199c77215fe3cb8c937797b909061ec49af&v=4)](https://github.com/naveentatikonda)[@naveentatikonda](https://github.com/naveentatikonda) [![](https://avatars.githubusercontent.com/u/24217337?u=09d0e274f382e264ef578e93b547fb55a5b179fe&v=4)](https://github.com/kylehh)[@kylehh](https://github.com/kylehh) [![](https://avatars.githubusercontent.com/u/6162415?u=82e86c06ae37add3750f9db9ad9d7dfa250ddae7&v=4)](https://github.com/navneet1v)[@navneet1v](https://github.com/navneet1v) [![](https://avatars.githubusercontent.com/u/851520?u=21c6d8ef697fd32a8020d81269e155a24cb081ac&v=4)](https://github.com/maxjakob)[@maxjakob](https://github.com/maxjakob) [![](https://avatars.githubusercontent.com/u/2887713?u=7bb198c7d11d29a412dc836818f3da6666f643ee&v=4)](https://github.com/Jibola)[@Jibola](https://github.com/Jibola) Top recent contributors[​](#top-recent-contributors "Direct link to Top recent contributors") --------------------------------------------------------------------------------------------- The list below contains contributors who have had the most PRs merged in the last three months, weighted (imperfectly) by impact. Thank you all so much for your time and efforts in making LangChain better ❤️! [![](https://avatars.githubusercontent.com/u/2256422?v=4)](https://github.com/leo-gan)[@leo-gan](https://github.com/leo-gan) [![](https://avatars.githubusercontent.com/u/11633333?u=e13817e11b3fb8c3d209d747c77a0f0742d11138&v=4)](https://github.com/cbornet)[@cbornet](https://github.com/cbornet) [![](https://avatars.githubusercontent.com/u/19948365?v=4)](https://github.com/tomasonjo)[@tomasonjo](https://github.com/tomasonjo) [![](https://avatars.githubusercontent.com/u/19181718?u=79a9013dea28a7fa654431cd7e89b08dc76434dd&v=4)](https://github.com/sepiatone)[@sepiatone](https://github.com/sepiatone) [![](https://avatars.githubusercontent.com/u/48236177?u=757490c6af76be0a8837dd5886991005a23c89c7&v=4)](https://github.com/liugddx)[@liugddx](https://github.com/liugddx) [![](https://avatars.githubusercontent.com/u/851520?u=21c6d8ef697fd32a8020d81269e155a24cb081ac&v=4)](https://github.com/maxjakob)[@maxjakob](https://github.com/maxjakob) [![](https://avatars.githubusercontent.com/u/11026406?v=4)](https://github.com/lkuligin)[@lkuligin](https://github.com/lkuligin) [![](https://avatars.githubusercontent.com/u/127103098?v=4)](https://github.com/harry-cohere)[@harry-cohere](https://github.com/harry-cohere) [![](https://avatars.githubusercontent.com/u/91237924?u=76e7131a2ebbe9ef35061620286d6d06258e7a61&v=4)](https://github.com/OpenVINO-dev-contest)[@OpenVINO-dev-contest](https://github.com/OpenVINO-dev-contest) [![](https://avatars.githubusercontent.com/u/14959173?u=87fcb0013440f648fb263168583695258b6dbf1c&v=4)](https://github.com/jhpiedrahitao)[@jhpiedrahitao](https://github.com/jhpiedrahitao) [![](https://avatars.githubusercontent.com/u/2887713?u=7bb198c7d11d29a412dc836818f3da6666f643ee&v=4)](https://github.com/Jibola)[@Jibola](https://github.com/Jibola) [![](https://avatars.githubusercontent.com/u/31382824?u=9ce2d58c7c1c9f9a225f1929633b77c24d607d5b&v=4)](https://github.com/Adi8885)[@Adi8885](https://github.com/Adi8885) [![](https://avatars.githubusercontent.com/u/144115527?u=b881a61482b25b543dacd217d18fc5b98c38e7a3&v=4)](https://github.com/billytrend-cohere)[@billytrend-cohere](https://github.com/billytrend-cohere) [![](https://avatars.githubusercontent.com/u/139469471?v=4)](https://github.com/MateuszOssGit)[@MateuszOssGit](https://github.com/MateuszOssGit) [![](https://avatars.githubusercontent.com/u/39553475?u=919fcd626077055164ce97bf6cde0a47c54507de&v=4)](https://github.com/Josephasafg)[@Josephasafg](https://github.com/Josephasafg) [![](https://avatars.githubusercontent.com/u/9318457?u=3dbf765a07fee48e3dd171851b8417c002a41f49&v=4)](https://github.com/rahul-trip)[@rahul-trip](https://github.com/rahul-trip) [![](https://avatars.githubusercontent.com/u/35945268?u=4379ecd5062eea0f6449c520ddde5fe1e3724500&v=4)](https://github.com/junkeon)[@junkeon](https://github.com/junkeon) [![](https://avatars.githubusercontent.com/u/63123596?u=ae18d496d5a6ced90d57c147f102f7c5ecf8e63f&v=4)](https://github.com/maximeperrindev)[@maximeperrindev](https://github.com/maximeperrindev) [![](https://avatars.githubusercontent.com/u/45242107?u=bf122f1371d59c3ba69a87225255fbd00e894404&v=4)](https://github.com/keenborder786)[@keenborder786](https://github.com/keenborder786) [![](https://avatars.githubusercontent.com/u/22965499?u=883e3e34158ff6beadadef0178f83d1200be1acf&v=4)](https://github.com/sfvaroglu)[@sfvaroglu](https://github.com/sfvaroglu) Core maintainers[​](#core-maintainers "Direct link to Core maintainers") ------------------------------------------------------------------------ Hello there 👋! We're LangChain's core maintainers. If you've spent time in the community, you've probably crossed paths with at least one of us already. [![](https://avatars.githubusercontent.com/u/9557659?u=44391f1f5f5e3a72acc9772ca30f28bfdcc25fac&v=4)](https://github.com/efriis)[@efriis](https://github.com/efriis) [![](https://avatars.githubusercontent.com/u/26529506?u=528b1df1ba3ba4f21e3e1fb74b12766e5b04c487&v=4)](https://github.com/ccurme)[@ccurme](https://github.com/ccurme) [![](https://avatars.githubusercontent.com/u/9536492?u=820809d60f4a720a4e1f507a1bf866dfb5f86614&v=4)](https://github.com/agola11)[@agola11](https://github.com/agola11) [![](https://avatars.githubusercontent.com/u/22008038?u=8e3d6bbd0adbe02f0bd259c44f2ddb8612f90d88&v=4)](https://github.com/baskaryan)[@baskaryan](https://github.com/baskaryan) [![](https://avatars.githubusercontent.com/u/19161700?u=e76bcd472b51c9f07befd2654783d0a381f49005&v=4)](https://github.com/vbarda)[@vbarda](https://github.com/vbarda) [![](https://avatars.githubusercontent.com/u/3205522?v=4)](https://github.com/eyurtsev)[@eyurtsev](https://github.com/eyurtsev) [![](https://avatars.githubusercontent.com/u/56902?u=fdb30e802c68bc338dd9c0820f713e4fdac75db7&v=4)](https://github.com/nfcampos)[@nfcampos](https://github.com/nfcampos) [![](https://avatars.githubusercontent.com/u/122662504?u=e88c472fba16a74332c550cc9707fd015738a0da&v=4)](https://github.com/rlancemartin)[@rlancemartin](https://github.com/rlancemartin) [![](https://avatars.githubusercontent.com/u/13333726?u=82ebf1e0eb0663ebd49ba66f67a43f51bbf11442&v=4)](https://github.com/hinthornw)[@hinthornw](https://github.com/hinthornw) [![](https://avatars.githubusercontent.com/u/11986836?u=f4c4f21a82b2af6c9f91e1f1d99ea40062f7a101&v=4)](https://github.com/hwchase17)[@hwchase17](https://github.com/hwchase17) Top all-time contributors[​](#top-all-time-contributors "Direct link to Top all-time contributors") --------------------------------------------------------------------------------------------------- And finally, this is an all-time list of all-stars who have made significant contributions to the framework 🌟: [![](https://avatars.githubusercontent.com/u/2256422?v=4)](https://github.com/leo-gan)[@leo-gan](https://github.com/leo-gan) [![](https://avatars.githubusercontent.com/u/11633333?u=e13817e11b3fb8c3d209d747c77a0f0742d11138&v=4)](https://github.com/cbornet)[@cbornet](https://github.com/cbornet) [![](https://avatars.githubusercontent.com/u/19948365?v=4)](https://github.com/tomasonjo)[@tomasonjo](https://github.com/tomasonjo) [![](https://avatars.githubusercontent.com/u/11026406?v=4)](https://github.com/lkuligin)[@lkuligin](https://github.com/lkuligin) [![](https://avatars.githubusercontent.com/u/1635179?u=0631cb84ca580089198114f94d9c27efe730220e&v=4)](https://github.com/MthwRobinson)[@MthwRobinson](https://github.com/MthwRobinson) [![](https://avatars.githubusercontent.com/u/2649301?u=5e688d2b90ddcafd5028a9da292010144cad6d18&v=4)](https://github.com/kacperlukawski)[@kacperlukawski](https://github.com/kacperlukawski) [![](https://avatars.githubusercontent.com/u/14221764?u=47a1405343b4d92caed3744e82dda1d28d01a251&v=4)](https://github.com/hemidactylus)[@hemidactylus](https://github.com/hemidactylus) [![](https://avatars.githubusercontent.com/u/707699?u=5af157e56c17bb694ed78f27ba313dcb576f00bd&v=4)](https://github.com/timothyasp)[@timothyasp](https://github.com/timothyasp) [![](https://avatars.githubusercontent.com/u/6690839?u=e56c2161ddc98c58b01fb82da4076e5400fb1e6d&v=4)](https://github.com/sjwhitmore)[@sjwhitmore](https://github.com/sjwhitmore) [![](https://avatars.githubusercontent.com/u/289369?u=80655eb5f9a4d03bf1a526b07a67adc6eacccc6b&v=4)](https://github.com/3coins)[@3coins](https://github.com/3coins) [![](https://avatars.githubusercontent.com/u/6439365?u=51c4e9ea28b36473f21524fb68f7b717047e36f9&v=4)](https://github.com/mbchang)[@mbchang](https://github.com/mbchang) [![](https://avatars.githubusercontent.com/u/131175?u=332fe36f12d9ffe9e4414dc776b381fe801a9c53&v=4)](https://github.com/danielchalef)[@danielchalef](https://github.com/danielchalef) [![](https://avatars.githubusercontent.com/u/48236177?u=757490c6af76be0a8837dd5886991005a23c89c7&v=4)](https://github.com/liugddx)[@liugddx](https://github.com/liugddx) [![](https://avatars.githubusercontent.com/u/44113430?u=34bdaacaeb2880e40fb4b07897c481771c6de544&v=4)](https://github.com/mspronesti)[@mspronesti](https://github.com/mspronesti) [![](https://avatars.githubusercontent.com/u/15604894?u=420ab32f71fa4a6839da653b5a5d97381b087902&v=4)](https://github.com/chyroc)[@chyroc](https://github.com/chyroc) [![](https://avatars.githubusercontent.com/u/13749212?u=b58700c3bd236e880223bccba53b7ad0dd4d7003&v=4)](https://github.com/eavanvalkenburg)[@eavanvalkenburg](https://github.com/eavanvalkenburg) [![](https://avatars.githubusercontent.com/u/23517545?u=06757717778f7c2a0a092b78edfc242d356a2b3f&v=4)](https://github.com/shibuiwilliam)[@shibuiwilliam](https://github.com/shibuiwilliam) [![](https://avatars.githubusercontent.com/u/13262395?u=430eff10dfbb7d3f27a35f1ea2c9ea6a61067c88&v=4)](https://github.com/holtskinner)[@holtskinner](https://github.com/holtskinner) [![](https://avatars.githubusercontent.com/u/19181718?u=79a9013dea28a7fa654431cd7e89b08dc76434dd&v=4)](https://github.com/sepiatone)[@sepiatone](https://github.com/sepiatone) [![](https://avatars.githubusercontent.com/u/139469471?v=4)](https://github.com/MateuszOssGit)[@MateuszOssGit](https://github.com/MateuszOssGit) [![](https://avatars.githubusercontent.com/u/24279597?u=05e329b5fa4f95223f9fbb1daa07118f72e4a071&v=4)](https://github.com/fpingham)[@fpingham](https://github.com/fpingham) [![](https://avatars.githubusercontent.com/u/10000925?u=7970fa7b01d133adfe533c4311b7963e22dc6766&v=4)](https://github.com/169)[@169](https://github.com/169) [![](https://avatars.githubusercontent.com/u/749277?u=84aeb7b75146a67f8b18b389dc591ba72ef105e4&v=4)](https://github.com/tjaffri)[@tjaffri](https://github.com/tjaffri) [![](https://avatars.githubusercontent.com/u/144115527?u=b881a61482b25b543dacd217d18fc5b98c38e7a3&v=4)](https://github.com/billytrend-cohere)[@billytrend-cohere](https://github.com/billytrend-cohere) [![](https://avatars.githubusercontent.com/u/20311743?u=29bf2391ae34297a12a88d813731b0bdf289e4a5&v=4)](https://github.com/nickscamara)[@nickscamara](https://github.com/nickscamara) [![](https://avatars.githubusercontent.com/u/851520?u=21c6d8ef697fd32a8020d81269e155a24cb081ac&v=4)](https://github.com/maxjakob)[@maxjakob](https://github.com/maxjakob) [![](https://avatars.githubusercontent.com/u/45242107?u=bf122f1371d59c3ba69a87225255fbd00e894404&v=4)](https://github.com/keenborder786)[@keenborder786](https://github.com/keenborder786) [![](https://avatars.githubusercontent.com/u/142261444?u=23524d34d4d0dfce963a24131a3c28e89daa9fc7&v=4)](https://github.com/maks-operlejn-ds)[@maks-operlejn-ds](https://github.com/maks-operlejn-ds) [![](https://avatars.githubusercontent.com/u/1823547?u=ea9246b84dbc3886d96ba171aabb64d2470c8d60&v=4)](https://github.com/ofermend)[@ofermend](https://github.com/ofermend) [![](https://avatars.githubusercontent.com/u/64213648?u=a9a3c39e0277dcb74d102e73511df929d2a1ecc6&v=4)](https://github.com/sergerdn)[@sergerdn](https://github.com/sergerdn) [![](https://avatars.githubusercontent.com/u/57520563?v=4)](https://github.com/volodymyr-memsql)[@volodymyr-memsql](https://github.com/volodymyr-memsql) [![](https://avatars.githubusercontent.com/u/2887713?u=7bb198c7d11d29a412dc836818f3da6666f643ee&v=4)](https://github.com/Jibola)[@Jibola](https://github.com/Jibola) [![](https://avatars.githubusercontent.com/u/6519888?u=fe0b0f093e8683bdac4f205b237d2e48d7c755d4&v=4)](https://github.com/averikitsch)[@averikitsch](https://github.com/averikitsch) [![](https://avatars.githubusercontent.com/u/89161683?u=4a59b199c77215fe3cb8c937797b909061ec49af&v=4)](https://github.com/naveentatikonda)[@naveentatikonda](https://github.com/naveentatikonda) [![](https://avatars.githubusercontent.com/u/56769451?u=088102b6160822bc68c25a2a5df170080d0b16a2&v=4)](https://github.com/tyumentsev4)[@tyumentsev4](https://github.com/tyumentsev4) [![](https://avatars.githubusercontent.com/u/40663591?u=d0a44575938f379eb414c15d9bdc0ecf6911f1b8&v=4)](https://github.com/UmerHA)[@UmerHA](https://github.com/UmerHA) [![](https://avatars.githubusercontent.com/u/84336755?u=35224f42916080bd7add99571a3132f5ef8217b8&v=4)](https://github.com/joshuasundance-swca)[@joshuasundance-swca](https://github.com/joshuasundance-swca) [![](https://avatars.githubusercontent.com/u/54854336?v=4)](https://github.com/adolkhan)[@adolkhan](https://github.com/adolkhan) [![](https://avatars.githubusercontent.com/u/22579106?v=4)](https://github.com/seamusp)[@seamusp](https://github.com/seamusp) [![](https://avatars.githubusercontent.com/u/63565275?u=08a65e589a3045dad9c13218858c8a91d16528fc&v=4)](https://github.com/michaelfeil)[@michaelfeil](https://github.com/michaelfeil) [![](https://avatars.githubusercontent.com/u/31382824?u=9ce2d58c7c1c9f9a225f1929633b77c24d607d5b&v=4)](https://github.com/Adi8885)[@Adi8885](https://github.com/Adi8885) [![](https://avatars.githubusercontent.com/u/210457?u=3f6ac4dcc1ec9f1b98cc62fd7095120da2accbc4&v=4)](https://github.com/blob42)[@blob42](https://github.com/blob42) [![](https://avatars.githubusercontent.com/u/3690240?v=4)](https://github.com/malandis)[@malandis](https://github.com/malandis) [![](https://avatars.githubusercontent.com/u/8456706?u=bc28d399a4ef7495eaa1e8a8a7b99dda98217260&v=4)](https://github.com/mpskex)[@mpskex](https://github.com/mpskex) [![](https://avatars.githubusercontent.com/u/7069390?u=c10e9b05119b96e82f03a807a2392f938a59f4ef&v=4)](https://github.com/davidbuniat)[@davidbuniat](https://github.com/davidbuniat) [![](https://avatars.githubusercontent.com/u/5787923?u=368596daa7442493d6c26725eb7d0ac5678c7e73&v=4)](https://github.com/ShreyaR)[@ShreyaR](https://github.com/ShreyaR) [![](https://avatars.githubusercontent.com/u/1825679?u=bc5db0325ef2a546c67e1e2ae1f7a0af7afe6803&v=4)](https://github.com/maiqingqiang)[@maiqingqiang](https://github.com/maiqingqiang) [![](https://avatars.githubusercontent.com/u/20304844?u=f00461bcedad6ba384a4e234a44c906802448b4e&v=4)](https://github.com/tylerhutcherson)[@tylerhutcherson](https://github.com/tylerhutcherson) [![](https://avatars.githubusercontent.com/u/62768671?u=279f772a5b8325a191a1a8bb623aa40f32a01856&v=4)](https://github.com/skcoirz)[@skcoirz](https://github.com/skcoirz) [![](https://avatars.githubusercontent.com/u/91237924?u=76e7131a2ebbe9ef35061620286d6d06258e7a61&v=4)](https://github.com/OpenVINO-dev-contest)[@OpenVINO-dev-contest](https://github.com/OpenVINO-dev-contest) [![](https://avatars.githubusercontent.com/u/8990777?u=9f7c4ab36aa10d7594748fdc9ddba6ff3f0a2f77&v=4)](https://github.com/jamesbraza)[@jamesbraza](https://github.com/jamesbraza) [![](https://avatars.githubusercontent.com/u/66525873?u=71102c35b5c8d325d34c32a4f9a07b6f97d90836&v=4)](https://github.com/manuel-soria)[@manuel-soria](https://github.com/manuel-soria) [![](https://avatars.githubusercontent.com/u/94075036?u=b636b7e4d6abff66af96ccae00d539db4735eea1&v=4)](https://github.com/CG80499)[@CG80499](https://github.com/CG80499) [![](https://avatars.githubusercontent.com/u/60956360?u=5678f015273d23e2cbdacbe172bcf154de0f4f86&v=4)](https://github.com/outday29)[@outday29](https://github.com/outday29) [![](https://avatars.githubusercontent.com/u/127103098?v=4)](https://github.com/harry-cohere)[@harry-cohere](https://github.com/harry-cohere) [![](https://avatars.githubusercontent.com/u/1821407?u=0a24b0db8c1a9231ce1c347de92f57341defada2&v=4)](https://github.com/GMartin-dev)[@GMartin-dev](https://github.com/GMartin-dev) [![](https://avatars.githubusercontent.com/u/15918167?v=4)](https://github.com/ljeagle)[@ljeagle](https://github.com/ljeagle) [![](https://avatars.githubusercontent.com/u/46051506?u=026f5f140e8b7ba4744bf971f9ebdea9ebab67ca&v=4)](https://github.com/Anush008)[@Anush008](https://github.com/Anush008) [![](https://avatars.githubusercontent.com/u/49480?u=4a9b7c8820211aae14da7f72f617d88019a06569&v=4)](https://github.com/joemcelroy)[@joemcelroy](https://github.com/joemcelroy) [![](https://avatars.githubusercontent.com/u/13748374?u=47b1f523342466ab97dd23e285418c5f5c9820c4&v=4)](https://github.com/wangxuqi)[@wangxuqi](https://github.com/wangxuqi) [![](https://avatars.githubusercontent.com/u/901795?u=c8cd7391f649623258b5f5ea848550df9407107b&v=4)](https://github.com/virattt)[@virattt](https://github.com/virattt) [![](https://avatars.githubusercontent.com/u/1097932?u=0e9c1cc9e2c02469e52963322344af181464bf43&v=4)](https://github.com/gengliangwang)[@gengliangwang](https://github.com/gengliangwang) [![](https://avatars.githubusercontent.com/u/20971593?u=1574196bb286044d23a04aa5aa34203ada8f4309&v=4)](https://github.com/jzluo)[@jzluo](https://github.com/jzluo) [![](https://avatars.githubusercontent.com/u/39553475?u=919fcd626077055164ce97bf6cde0a47c54507de&v=4)](https://github.com/Josephasafg)[@Josephasafg](https://github.com/Josephasafg) [![](https://avatars.githubusercontent.com/u/10701973?u=866bdbf25a3759626815099ce480e2ffcff520fb&v=4)](https://github.com/IANTHEREAL)[@IANTHEREAL](https://github.com/IANTHEREAL) [![](https://avatars.githubusercontent.com/u/142883372?u=45481f472f5f89c4d8ca8788617ffac47c5ebd88&v=4)](https://github.com/mateusz-wosinski-ds)[@mateusz-wosinski-ds](https://github.com/mateusz-wosinski-ds) [![](https://avatars.githubusercontent.com/u/5013466?u=f46f9262437c7f899394561c2f2dcb7e4b669868&v=4)](https://github.com/Jped)[@Jped](https://github.com/Jped) [![](https://avatars.githubusercontent.com/u/24587702?u=bc1fe15724c747b755a5b3812e802d7cbdd134c2&v=4)](https://github.com/hughcrt)[@hughcrt](https://github.com/hughcrt) [![](https://avatars.githubusercontent.com/u/62176855?v=4)](https://github.com/cs0lar)[@cs0lar](https://github.com/cs0lar) [![](https://avatars.githubusercontent.com/u/141953346?u=ede12989daf498a2df632344378a57e4f2b4c317&v=4)](https://github.com/ShorthillsAI)[@ShorthillsAI](https://github.com/ShorthillsAI) [![](https://avatars.githubusercontent.com/u/17039389?u=796226152becf82c4d7fd5cc49a24e58a73ce66f&v=4)](https://github.com/harupy)[@harupy](https://github.com/harupy) [![](https://avatars.githubusercontent.com/u/1296705?v=4)](https://github.com/lalanikarim)[@lalanikarim](https://github.com/lalanikarim) [![](https://avatars.githubusercontent.com/u/14959173?u=87fcb0013440f648fb263168583695258b6dbf1c&v=4)](https://github.com/jhpiedrahitao)[@jhpiedrahitao](https://github.com/jhpiedrahitao) [![](https://avatars.githubusercontent.com/u/24217337?u=09d0e274f382e264ef578e93b547fb55a5b179fe&v=4)](https://github.com/kylehh)[@kylehh](https://github.com/kylehh) [![](https://avatars.githubusercontent.com/u/53237856?u=656560c61bb540c9930574037126d2280ef0b4f8&v=4)](https://github.com/jeffvestal)[@jeffvestal](https://github.com/jeffvestal) [![](https://avatars.githubusercontent.com/u/32310964?u=56cd9386d632a330b8ecb180d7271b3d043c93a3&v=4)](https://github.com/VKudlay)[@VKudlay](https://github.com/VKudlay) [![](https://avatars.githubusercontent.com/u/25208228?u=a89453c38529259ef0ac9c6fd2a695311a680386&v=4)](https://github.com/conceptofmind)[@conceptofmind](https://github.com/conceptofmind) [![](https://avatars.githubusercontent.com/u/22171838?u=a7c4ea3fcebeafc5e9857727974bf2a3362dafe4&v=4)](https://github.com/ruoccofabrizio)[@ruoccofabrizio](https://github.com/ruoccofabrizio) [![](https://avatars.githubusercontent.com/u/49201354?u=adef4744d1abcd52f751d21a30fbe52abddf9b94&v=4)](https://github.com/axiangcoding)[@axiangcoding](https://github.com/axiangcoding) [![](https://avatars.githubusercontent.com/u/2464556?u=4d6150c38daf305b43153112d1f2815d287273ea&v=4)](https://github.com/homanp)[@homanp](https://github.com/homanp) [![](https://avatars.githubusercontent.com/u/10434946?u=6e20682a9c48909576b6ecc2fc93da3dbb90a52a&v=4)](https://github.com/yakigac)[@yakigac](https://github.com/yakigac) [![](https://avatars.githubusercontent.com/u/204694?u=c42de41cff108d35269dd2e8fac8977f1f4e471d&v=4)](https://github.com/pprados)[@pprados](https://github.com/pprados) [![](https://avatars.githubusercontent.com/u/17022025?u=ceee62d53f1c06bf9a014096b651ca0c42cfea3b&v=4)](https://github.com/zc277584121)[@zc277584121](https://github.com/zc277584121) [![](https://avatars.githubusercontent.com/u/5001050?u=d5d0c24dc9566cec4b8e3cd376150c05b42c5210&v=4)](https://github.com/HunterGerlach)[@HunterGerlach](https://github.com/HunterGerlach) [![](https://avatars.githubusercontent.com/u/22633385?u=29190f6c8aed91fa9574b064a9995f1e49944acf&v=4)](https://github.com/eltociear)[@eltociear](https://github.com/eltociear) [![](https://avatars.githubusercontent.com/u/753206?u=911ac7819a0dcf86bd5fd8ad8e4f986e22b8579b&v=4)](https://github.com/gkorland)[@gkorland](https://github.com/gkorland) [![](https://avatars.githubusercontent.com/u/77560236?u=54a3bf63360d61f6571015dd46fa1d03460fbbc9&v=4)](https://github.com/Gordon-BP)[@Gordon-BP](https://github.com/Gordon-BP) [![](https://avatars.githubusercontent.com/u/18380243?u=746579a015b76842c0994cf04c623e683444fc90&v=4)](https://github.com/kzk-maeda)[@kzk-maeda](https://github.com/kzk-maeda) [![](https://avatars.githubusercontent.com/u/8893086?u=220ec6df446248eeb09a59230c017a2c57bf8e61&v=4)](https://github.com/saginawj)[@saginawj](https://github.com/saginawj) [![](https://avatars.githubusercontent.com/u/81822489?u=07badfd993685a278b1f929c1500a58837a6621d&v=4)](https://github.com/filip-halt)[@filip-halt](https://github.com/filip-halt) [![](https://avatars.githubusercontent.com/u/730013?v=4)](https://github.com/skozlovf)[@skozlovf](https://github.com/skozlovf) [![](https://avatars.githubusercontent.com/u/40636930?u=b1f3735dccd19433cc3aad1b673553bf7eb94723&v=4)](https://github.com/zachschillaci27)[@zachschillaci27](https://github.com/zachschillaci27) [![](https://avatars.githubusercontent.com/u/3032459?u=590f1489107c91803bbe75de26cfeeeb77b25f8d&v=4)](https://github.com/nelly-hateva)[@nelly-hateva](https://github.com/nelly-hateva) [![](https://avatars.githubusercontent.com/u/9318457?u=3dbf765a07fee48e3dd171851b8417c002a41f49&v=4)](https://github.com/rahul-trip)[@rahul-trip](https://github.com/rahul-trip) [![](https://avatars.githubusercontent.com/u/38650638?u=2b526137f18a7c41934c8da0722f1fedb74c3422&v=4)](https://github.com/wemysschen)[@wemysschen](https://github.com/wemysschen) [![](https://avatars.githubusercontent.com/u/22759784?v=4)](https://github.com/zanderchase)[@zanderchase](https://github.com/zanderchase) [![](https://avatars.githubusercontent.com/u/5894042?u=e34704516e5f58e932ce098a38747a9be8d614a5&v=4)](https://github.com/danielhjz)[@danielhjz](https://github.com/danielhjz) [![](https://avatars.githubusercontent.com/u/39944763?u=3074327b189542c2b47bb385b2d81d1e8ccb38e1&v=4)](https://github.com/os1ma)[@os1ma](https://github.com/os1ma) [![](https://avatars.githubusercontent.com/u/112245?u=c129f9b2439b082cca4a7a322e558fca514bb87d&v=4)](https://github.com/cevian)[@cevian](https://github.com/cevian) [![](https://avatars.githubusercontent.com/u/1309177?u=6328c998d93a48eba87c6b039783b8a7644c62c3&v=4)](https://github.com/charliermarsh)[@charliermarsh](https://github.com/charliermarsh) [![](https://avatars.githubusercontent.com/u/63123596?u=ae18d496d5a6ced90d57c147f102f7c5ecf8e63f&v=4)](https://github.com/maximeperrindev)[@maximeperrindev](https://github.com/maximeperrindev) [![](https://avatars.githubusercontent.com/u/2212586?v=4)](https://github.com/mackong)[@mackong](https://github.com/mackong) [![](https://avatars.githubusercontent.com/u/3760?u=1dfde576ef286346afcc2a71eaf1fdb2857fb547&v=4)](https://github.com/bborn)[@bborn](https://github.com/bborn) [![](https://avatars.githubusercontent.com/u/35945268?u=4379ecd5062eea0f6449c520ddde5fe1e3724500&v=4)](https://github.com/junkeon)[@junkeon](https://github.com/junkeon) [![](https://avatars.githubusercontent.com/u/129657162?u=353d87b0e8d4c628536e2e40a34a7622dc3c18ab&v=4)](https://github.com/jj701)[@jj701](https://github.com/jj701) [![](https://avatars.githubusercontent.com/u/26039352?v=4)](https://github.com/cauwulixuan)[@cauwulixuan](https://github.com/cauwulixuan) [![](https://avatars.githubusercontent.com/u/6406557?v=4)](https://github.com/markcusack)[@markcusack](https://github.com/markcusack) [![](https://avatars.githubusercontent.com/u/347398?v=4)](https://github.com/delip)[@delip](https://github.com/delip) [![](https://avatars.githubusercontent.com/u/757060?u=0c7583422d4c2b5572616f9e542e110bf5dd15f7&v=4)](https://github.com/ichernev)[@ichernev](https://github.com/ichernev) [![](https://avatars.githubusercontent.com/u/1812592?v=4)](https://github.com/kennethchoe)[@kennethchoe](https://github.com/kennethchoe) [![](https://avatars.githubusercontent.com/u/70973560?u=1a40b7be391714894999b7412de2e281abad530e&v=4)](https://github.com/amiaxys)[@amiaxys](https://github.com/amiaxys) [![](https://avatars.githubusercontent.com/u/891664?u=722172a0061f68ab22819fa88a354ec973f70a63&v=4)](https://github.com/jeffchuber)[@jeffchuber](https://github.com/jeffchuber) [![](https://avatars.githubusercontent.com/u/1995599?v=4)](https://github.com/shane-huang)[@shane-huang](https://github.com/shane-huang) [![](https://avatars.githubusercontent.com/u/14149230?u=ca710ca2a64391470163ddef6b5ea7633ab26872&v=4)](https://github.com/cbh123)[@cbh123](https://github.com/cbh123) [![](https://avatars.githubusercontent.com/u/17517367?u=b745b5f2016fbf166a75ce6ec18853c2fe7bbf12&v=4)](https://github.com/sdelgadoc)[@sdelgadoc](https://github.com/sdelgadoc) [![](https://avatars.githubusercontent.com/u/5794505?u=f78511e1a6ab9ab879647fe0a4230fef964190b5&v=4)](https://github.com/MartinKolbAtWork)[@MartinKolbAtWork](https://github.com/MartinKolbAtWork) [![](https://avatars.githubusercontent.com/u/951187?u=e80c215810058f57145042d12360d463e3a53443&v=4)](https://github.com/jirimoravcik)[@jirimoravcik](https://github.com/jirimoravcik) [![](https://avatars.githubusercontent.com/u/75213811?v=4)](https://github.com/kitrak-rev)[@kitrak-rev](https://github.com/kitrak-rev) [![](https://avatars.githubusercontent.com/u/1157440?u=2f81a28298c1172e732898a1f8e800342434801d&v=4)](https://github.com/tazarov)[@tazarov](https://github.com/tazarov) [![](https://avatars.githubusercontent.com/u/12809212?u=8c1f0baf8a29f3007e3a51f5cf7b4a8e04c5ca8d&v=4)](https://github.com/parambharat)[@parambharat](https://github.com/parambharat) [![](https://avatars.githubusercontent.com/u/139942740?u=fa99ca083ccdc7322c7b24f8a3c001e71be347b4&v=4)](https://github.com/baichuan-assistant)[@baichuan-assistant](https://github.com/baichuan-assistant) [![](https://avatars.githubusercontent.com/u/22965499?u=883e3e34158ff6beadadef0178f83d1200be1acf&v=4)](https://github.com/sfvaroglu)[@sfvaroglu](https://github.com/sfvaroglu) [![](https://avatars.githubusercontent.com/u/116604821?u=ec1518c27a7a15f33a138cf0b956ef1758edbaff&v=4)](https://github.com/sfc-gh-jcarroll)[@sfc-gh-jcarroll](https://github.com/sfc-gh-jcarroll) [![](https://avatars.githubusercontent.com/u/20006225?u=b5c543736384589fcb5b547f0d7700e545cb41ba&v=4)](https://github.com/jeffzwang)[@jeffzwang](https://github.com/jeffzwang) [![](https://avatars.githubusercontent.com/u/128378696?u=8c818bd39c9cd75b606f3b5b1479787e4e6845d9&v=4)](https://github.com/BeatrixCohere)[@BeatrixCohere](https://github.com/BeatrixCohere) [![](https://avatars.githubusercontent.com/u/38215315?u=3985b6a3ecb0e8338c5912ea9e20787152d0ad7a&v=4)](https://github.com/P-E-B)[@P-E-B](https://github.com/P-E-B) [![](https://avatars.githubusercontent.com/u/3045965?u=3d3c34259d50723955dd92d1de5be21236989356&v=4)](https://github.com/chadj2)[@chadj2](https://github.com/chadj2) [![](https://avatars.githubusercontent.com/u/43734688?u=78f139fa940620e301361a58821c9f56128f71d9&v=4)](https://github.com/sam-h-bean)[@sam-h-bean](https://github.com/sam-h-bean) [![](https://avatars.githubusercontent.com/u/60664495?u=ace0011a868848b48cdf9c199110dc8e5be5f433&v=4)](https://github.com/williamdevena)[@williamdevena](https://github.com/williamdevena) [![](https://avatars.githubusercontent.com/u/31483888?u=55359c6f832dfed3abf0e89ea9842ec88849341d&v=4)](https://github.com/filip-michalsky)[@filip-michalsky](https://github.com/filip-michalsky) [![](https://avatars.githubusercontent.com/u/3207674?v=4)](https://github.com/k8si)[@k8si](https://github.com/k8si) [![](https://avatars.githubusercontent.com/u/339166?v=4)](https://github.com/alexsherstinsky)[@alexsherstinsky](https://github.com/alexsherstinsky) [![](https://avatars.githubusercontent.com/u/7287580?u=5fe01002eec3d9df91ce3cef0016916554379efd&v=4)](https://github.com/edwardzjl)[@edwardzjl](https://github.com/edwardzjl) [![](https://avatars.githubusercontent.com/u/63742054?u=befe4ae74b906698be965bad482d0e02fc7707ab&v=4)](https://github.com/Nutlope)[@Nutlope](https://github.com/Nutlope) [![](https://avatars.githubusercontent.com/u/26054637?u=edd1e4f54e91b549f2edb525d43210f4f04d7367&v=4)](https://github.com/paul-paliychuk)[@paul-paliychuk](https://github.com/paul-paliychuk) [![](https://avatars.githubusercontent.com/u/4133076?u=f3f783e0364abe955dbde6af80445ea27d948fdd&v=4)](https://github.com/gregnr)[@gregnr](https://github.com/gregnr) [![](https://avatars.githubusercontent.com/u/70665700?u=d7c78b0f3e6c5b1f359d574cd03bdb75bf6bf2da&v=4)](https://github.com/asamant21)[@asamant21](https://github.com/asamant21) [![](https://avatars.githubusercontent.com/u/12044110?v=4)](https://github.com/sudranga)[@sudranga](https://github.com/sudranga) [![](https://avatars.githubusercontent.com/u/5168949?v=4)](https://github.com/sseide)[@sseide](https://github.com/sseide) [![](https://avatars.githubusercontent.com/u/216931?u=a8ca27d75e1765295ea9d23c191d8db834951066&v=4)](https://github.com/scottnath)[@scottnath](https://github.com/scottnath) [![](https://avatars.githubusercontent.com/u/125713079?u=d42f76da6ffe0be48277c5ebdec4684ff1b38415&v=4)](https://github.com/AI-Bassem)[@AI-Bassem](https://github.com/AI-Bassem) [![](https://avatars.githubusercontent.com/u/32453863?v=4)](https://github.com/BeautyyuYanli)[@BeautyyuYanli](https://github.com/BeautyyuYanli) [![](https://avatars.githubusercontent.com/u/167348611?v=4)](https://github.com/dglogo)[@dglogo](https://github.com/dglogo) [![](https://avatars.githubusercontent.com/u/1074525?v=4)](https://github.com/gradenr)[@gradenr](https://github.com/gradenr) [![](https://avatars.githubusercontent.com/u/24482442?u=d6095b9533599b26d16fe6273d8f513206976a62&v=4)](https://github.com/rohanaggarwal7997)[@rohanaggarwal7997](https://github.com/rohanaggarwal7997) [![](https://avatars.githubusercontent.com/u/4787922?u=dd4c7a18d86a6ad56455aa13e66daedbbbcf31b7&v=4)](https://github.com/zhaoshengbo)[@zhaoshengbo](https://github.com/zhaoshengbo) [![](https://avatars.githubusercontent.com/u/14350521?u=4d5e9bb44d41a1ff30f2efbb2959a21e33644e81&v=4)](https://github.com/hakantekgul)[@hakantekgul](https://github.com/hakantekgul) [![](https://avatars.githubusercontent.com/u/142571618?v=4)](https://github.com/eryk-dsai)[@eryk-dsai](https://github.com/eryk-dsai) [![](https://avatars.githubusercontent.com/u/3469711?u=6962798c0280caa0d0260ccb8be1b18fb3ea44b2&v=4)](https://github.com/mrtj)[@mrtj](https://github.com/mrtj) [![](https://avatars.githubusercontent.com/u/5069448?u=6b0ba426b68777f4935399013b7c2c112635c0df&v=4)](https://github.com/pcliupc)[@pcliupc](https://github.com/pcliupc) [![](https://avatars.githubusercontent.com/u/36760800?u=12735f9035294180cb0b83446bdf7d8ac1a3fef9&v=4)](https://github.com/alvarobartt)[@alvarobartt](https://github.com/alvarobartt) [![](https://avatars.githubusercontent.com/u/124558887?u=843f9f9de97097d85d0f685e0916d58196554421&v=4)](https://github.com/rogerserper)[@rogerserper](https://github.com/rogerserper) [![](https://avatars.githubusercontent.com/u/320302?u=657574cdbadd4bfb4c8ed65f8646d4983d7ca5f0&v=4)](https://github.com/ekzhu)[@ekzhu](https://github.com/ekzhu) [![](https://avatars.githubusercontent.com/u/139821907?u=f6f9648457adc2c15f407bb06d29089ae7e6f4cf&v=4)](https://github.com/ashleyxuu)[@ashleyxuu](https://github.com/ashleyxuu) [![](https://avatars.githubusercontent.com/u/4036753?u=c6732c896b41c1ecec917bfae38aa6900585c632&v=4)](https://github.com/bhalder)[@bhalder](https://github.com/bhalder) [![](https://avatars.githubusercontent.com/u/57731498?u=fec622b37ca3dc04125144116ad5165f37f85823&v=4)](https://github.com/mattgotteiner)[@mattgotteiner](https://github.com/mattgotteiner) [![](https://avatars.githubusercontent.com/u/17904229?u=3c9fa8237a9d29136d3bd1dd2a380ff6dddb5d94&v=4)](https://github.com/ZixinYang)[@ZixinYang](https://github.com/ZixinYang) [![](https://avatars.githubusercontent.com/u/48101485?u=dcf140777416a7d86a450964fc53ec5b17668603&v=4)](https://github.com/nikhilkjha)[@nikhilkjha](https://github.com/nikhilkjha) [![](https://avatars.githubusercontent.com/u/43818888?u=0c01fad081c0abd23d2d49ea4496890ffbc22325&v=4)](https://github.com/Dominastorm)[@Dominastorm](https://github.com/Dominastorm) [![](https://avatars.githubusercontent.com/u/13537446?v=4)](https://github.com/raunakshrivastava7)[@raunakshrivastava7](https://github.com/raunakshrivastava7) [![](https://avatars.githubusercontent.com/u/121117945?v=4)](https://github.com/rodrigo-f-nogueira)[@rodrigo-f-nogueira](https://github.com/rodrigo-f-nogueira) [![](https://avatars.githubusercontent.com/u/1585539?u=654a21985c875f78a20eda7e4884e8d64de86fba&v=4)](https://github.com/benjibc)[@benjibc](https://github.com/benjibc) [![](https://avatars.githubusercontent.com/u/53276514?u=d08fad4653e8d1b89382507a07f6990437730433&v=4)](https://github.com/hoyungcher)[@hoyungcher](https://github.com/hoyungcher) [![](https://avatars.githubusercontent.com/u/41710527?u=788f651d9933b36523feb431811a6531ecd994f1&v=4)](https://github.com/OwenPendrighElliott)[@OwenPendrighElliott](https://github.com/OwenPendrighElliott) [![](https://avatars.githubusercontent.com/u/67210837?u=7e6d3db8c71e8fdd631017b8c9f6b83248923007&v=4)](https://github.com/KyrianC)[@KyrianC](https://github.com/KyrianC) [![](https://avatars.githubusercontent.com/u/8142467?u=a62a20762c7fd841b470efc0ebdf5e1a01816f87&v=4)](https://github.com/Mikelarg)[@Mikelarg](https://github.com/Mikelarg) [![](https://avatars.githubusercontent.com/u/8862797?u=1856f20a3ac7425e75df7860bfd8934278fbdd53&v=4)](https://github.com/netoferraz)[@netoferraz](https://github.com/netoferraz) [![](https://avatars.githubusercontent.com/u/12782505?u=a3f1c6e7e68b96bb7be08ecd25f74f2396394597&v=4)](https://github.com/nithishr)[@nithishr](https://github.com/nithishr) [![](https://avatars.githubusercontent.com/u/3625100?u=b219abaae5763632a0edf8d79b46dca035f166a4&v=4)](https://github.com/zizhong)[@zizhong](https://github.com/zizhong) [![](https://avatars.githubusercontent.com/u/81076998?v=4)](https://github.com/amicus-veritatis)[@amicus-veritatis](https://github.com/amicus-veritatis) [![](https://avatars.githubusercontent.com/u/18572161?u=a09c7a053aa54cfc62ff8530c81486441215a09c&v=4)](https://github.com/MikeNitsenko)[@MikeNitsenko](https://github.com/MikeNitsenko) [![](https://avatars.githubusercontent.com/u/7851093?u=ab3c2c9c6ebd0cd1cd3ff2f83f8618ab9b2550ad&v=4)](https://github.com/liangz1)[@liangz1](https://github.com/liangz1) [![](https://avatars.githubusercontent.com/u/7953259?u=a451fad7ad197a8920651cf89aaf5d950734d0a8&v=4)](https://github.com/mikelambert)[@mikelambert](https://github.com/mikelambert) [![](https://avatars.githubusercontent.com/u/23314389?u=2014e20e246530fa89bd902fe703b6f9e6ecf833&v=4)](https://github.com/nicoloboschi)[@nicoloboschi](https://github.com/nicoloboschi) [![](https://avatars.githubusercontent.com/u/136885?u=9a42f56ad8055a03a5ae8a0272e66d1ae4ac083c&v=4)](https://github.com/mkorpela)[@mkorpela](https://github.com/mkorpela) [![](https://avatars.githubusercontent.com/u/31125281?u=1bc56191c789906c2a11a4183c108b2784609015&v=4)](https://github.com/linancn)[@linancn](https://github.com/linancn) [![](https://avatars.githubusercontent.com/u/101817?u=39f31ff29d2589046148c6ed1c1c923982d86b1a&v=4)](https://github.com/tsg)[@tsg](https://github.com/tsg) [![](https://avatars.githubusercontent.com/u/85610855?v=4)](https://github.com/am-kinetica)[@am-kinetica](https://github.com/am-kinetica) [![](https://avatars.githubusercontent.com/u/51159628?u=5aec3cf0263e77234dd83f8e6bf4955e39acd472&v=4)](https://github.com/anar2706)[@anar2706](https://github.com/anar2706) [![](https://avatars.githubusercontent.com/u/79988483?u=7b1cf8516362448115fc68870ad006a37a99d549&v=4)](https://github.com/yifeis7)[@yifeis7](https://github.com/yifeis7) [![](https://avatars.githubusercontent.com/u/908389?v=4)](https://github.com/whitead)[@whitead](https://github.com/whitead) [![](https://avatars.githubusercontent.com/u/89472452?u=47bcc0d72d51f2f914a759a0fde9ef3d1c677b98&v=4)](https://github.com/benitoThree)[@benitoThree](https://github.com/benitoThree) [![](https://avatars.githubusercontent.com/u/3300000?v=4)](https://github.com/ruze00)[@ruze00](https://github.com/ruze00) [![](https://avatars.githubusercontent.com/u/34462078?u=20243a60ac608142887c14251502c2a975614ba3&v=4)](https://github.com/raghavdixit99)[@raghavdixit99](https://github.com/raghavdixit99) [![](https://avatars.githubusercontent.com/u/53417823?v=4)](https://github.com/HeChangHaoGary)[@HeChangHaoGary](https://github.com/HeChangHaoGary) [![](https://avatars.githubusercontent.com/u/2851934?u=01c0d440fcb7fdb3159a7b641c58b5595028e9bc&v=4)](https://github.com/xiaoyuxee)[@xiaoyuxee](https://github.com/xiaoyuxee) [![](https://avatars.githubusercontent.com/u/15706966?u=f6dd024f1fc955b7d411eb13ebcae7334b527063&v=4)](https://github.com/jerwelborn)[@jerwelborn](https://github.com/jerwelborn) [![](https://avatars.githubusercontent.com/u/58508471?u=74423e863298863bf5c7dd7d1bff0aa106a9cc75&v=4)](https://github.com/Anindyadeep)[@Anindyadeep](https://github.com/Anindyadeep) [![](https://avatars.githubusercontent.com/u/65446134?u=a292659bc2611825b65a56a7ee6bfe6fdbfa033b&v=4)](https://github.com/vairodp)[@vairodp](https://github.com/vairodp) [![](https://avatars.githubusercontent.com/u/23406704?u=ac10555099789a8423dbc205ab4257b40aaf3860&v=4)](https://github.com/aletna)[@aletna](https://github.com/aletna) [![](https://avatars.githubusercontent.com/u/2398765?u=0c438bd074b242c5896334e6da1f0801c2f581e4&v=4)](https://github.com/hsm207)[@hsm207](https://github.com/hsm207) [![](https://avatars.githubusercontent.com/u/10937540?u=fcc094d7dfef2d3778c989def06199d9dc84fb61&v=4)](https://github.com/freemso)[@freemso](https://github.com/freemso) [![](https://avatars.githubusercontent.com/u/34411969?u=ae4aac513e377777fd6e46980e0e9414cdcd6f96&v=4)](https://github.com/DayuanJiang)[@DayuanJiang](https://github.com/DayuanJiang) [![](https://avatars.githubusercontent.com/u/7080882?u=f985127fd58fa96b886d591ce104f29f3bd7f81f&v=4)](https://github.com/rigazilla)[@rigazilla](https://github.com/rigazilla) [![](https://avatars.githubusercontent.com/u/4726889?u=1db838ee4066c26d5c0fa02311c7895c36969fb7&v=4)](https://github.com/apepkuss)[@apepkuss](https://github.com/apepkuss) [![](https://avatars.githubusercontent.com/u/69025547?u=97202d8501d38ed5015cfb3c40cf0ba2daeb795c&v=4)](https://github.com/gadhagod)[@gadhagod](https://github.com/gadhagod) [![](https://avatars.githubusercontent.com/u/154643880?u=3792a3c4581984a90f91ab05f720fd3d7b647d5b&v=4)](https://github.com/raveharpaz)[@raveharpaz](https://github.com/raveharpaz) [![](https://avatars.githubusercontent.com/u/91019033?u=30944d2fcb8759eefe2efa26c4d07b218d25ae33&v=4)](https://github.com/matthewdeguzman)[@matthewdeguzman](https://github.com/matthewdeguzman) [![](https://avatars.githubusercontent.com/u/13414571?u=c5490c987e1bcf8d47d7ecc4dca3812a21713f3a&v=4)](https://github.com/Tokkiu)[@Tokkiu](https://github.com/Tokkiu) [![](https://avatars.githubusercontent.com/u/100361543?u=f022d60888add75594372c5e8ebb32fc7fdc2794&v=4)](https://github.com/softboyjimbo)[@softboyjimbo](https://github.com/softboyjimbo) [![](https://avatars.githubusercontent.com/u/96572405?u=7784695f37788fb8048f6ce213bf1df3d4713f2d&v=4)](https://github.com/zhanghexian)[@zhanghexian](https://github.com/zhanghexian) [![](https://avatars.githubusercontent.com/u/117737297?u=0adf0f84cc345cc6e2ca3e4ad3c27a9ca8f53472&v=4)](https://github.com/rajtilakjee)[@rajtilakjee](https://github.com/rajtilakjee) [![](https://avatars.githubusercontent.com/u/1983160?u=536f2558c6ac33b74a6d89520dcb27ba46954070&v=4)](https://github.com/ashvardanian)[@ashvardanian](https://github.com/ashvardanian) [![](https://avatars.githubusercontent.com/u/4983896?u=4a0ba92f5b46b0c805a3c4715748f042a8c769a0&v=4)](https://github.com/plv)[@plv](https://github.com/plv) [![](https://avatars.githubusercontent.com/u/872712?u=c6e76fb451e3a0c1528a8d0e95ef3ed669483690&v=4)](https://github.com/TomTom101)[@TomTom101](https://github.com/TomTom101) [![](https://avatars.githubusercontent.com/u/43986145?u=3d15192e4d6ae36696e49e6c061d29f074f5ba77&v=4)](https://github.com/juliuslipp)[@juliuslipp](https://github.com/juliuslipp) [![](https://avatars.githubusercontent.com/u/1078320?u=786a976f97c3b9a75bd7467579d77e303d2acc8d&v=4)](https://github.com/pors)[@pors](https://github.com/pors) [![](https://avatars.githubusercontent.com/u/22906652?u=bee195145bb46c722da707939100f3a5a46fc8b9&v=4)](https://github.com/shivanimodi16)[@shivanimodi16](https://github.com/shivanimodi16) [![](https://avatars.githubusercontent.com/u/11373553?u=cebc40130d1da9f7ac666a2f6237a3c1148f65ef&v=4)](https://github.com/thomas0809)[@thomas0809](https://github.com/thomas0809) [![](https://avatars.githubusercontent.com/u/55012400?u=0a53d356ee0f3babed5fd7b3aec73a9e6b1724e6&v=4)](https://github.com/azamiftikhar1000)[@azamiftikhar1000](https://github.com/azamiftikhar1000) [![](https://avatars.githubusercontent.com/u/135340?v=4)](https://github.com/alecf)[@alecf](https://github.com/alecf) [![](https://avatars.githubusercontent.com/u/6756744?u=f576bd2ad9bb2ebfc8d45feb4a49e8add9ae79dc&v=4)](https://github.com/ecneladis)[@ecneladis](https://github.com/ecneladis) [![](https://avatars.githubusercontent.com/u/72488598?u=98dc24a63369cbae14913caff5f379f80f305aab&v=4)](https://github.com/Undertone0809)[@Undertone0809](https://github.com/Undertone0809) [![](https://avatars.githubusercontent.com/u/45447813?u=6d1f8b455599848e6cd9c2410ba5f4f02d2d368c&v=4)](https://github.com/hetaoBackend)[@hetaoBackend](https://github.com/hetaoBackend) [![](https://avatars.githubusercontent.com/u/1636116?u=617e8ebbd68598aada3a04642e7801c6b1dda152&v=4)](https://github.com/herrjemand)[@herrjemand](https://github.com/herrjemand) [![](https://avatars.githubusercontent.com/u/5798036?u=4eba31d63c3818d17fb8f9aa923599ac63ebfea8&v=4)](https://github.com/lesters)[@lesters](https://github.com/lesters) [![](https://avatars.githubusercontent.com/u/115359769?v=4)](https://github.com/max-arthurai)[@max-arthurai](https://github.com/max-arthurai) [![](https://avatars.githubusercontent.com/u/98474633?u=32ebf212dfc4d68c87f864c7d5bb9967ac85c96e&v=4)](https://github.com/philipkiely-baseten)[@philipkiely-baseten](https://github.com/philipkiely-baseten) [![](https://avatars.githubusercontent.com/u/45048633?v=4)](https://github.com/schadem)[@schadem](https://github.com/schadem) [![](https://avatars.githubusercontent.com/u/127325395?v=4)](https://github.com/Aratako)[@Aratako](https://github.com/Aratako) [![](https://avatars.githubusercontent.com/u/4067380?u=2776e796abeb0dfa8371dd528165ff0d96024a83&v=4)](https://github.com/anubhav94N)[@anubhav94N](https://github.com/anubhav94N) [![](https://avatars.githubusercontent.com/u/81988348?v=4)](https://github.com/rithwik-db)[@rithwik-db](https://github.com/rithwik-db) [![](https://avatars.githubusercontent.com/u/50788154?u=f924ef4e8d2b47be96f7a4b4357d17b6fafaea80&v=4)](https://github.com/kartheekyakkala)[@kartheekyakkala](https://github.com/kartheekyakkala) [![](https://avatars.githubusercontent.com/u/105399924?u=e69e8f1af87a33af3ecbdd5b5d4327c6dc254df6&v=4)](https://github.com/jiayini1119)[@jiayini1119](https://github.com/jiayini1119) [![](https://avatars.githubusercontent.com/u/14010132?u=7b08fe21105fd9835fe7e7c55a2174f2ec4d0a91&v=4)](https://github.com/aayush3011)[@aayush3011](https://github.com/aayush3011) [![](https://avatars.githubusercontent.com/u/11540660?u=efe357bf4cbe05c882528cc3ad78214776b80158&v=4)](https://github.com/shufanhao)[@shufanhao](https://github.com/shufanhao) [![](https://avatars.githubusercontent.com/u/13724617?v=4)](https://github.com/zcgeng)[@zcgeng](https://github.com/zcgeng) [![](https://avatars.githubusercontent.com/u/93145909?u=38b3ccf07a613963e9897627f940912128b7a83a&v=4)](https://github.com/ash0ts)[@ash0ts](https://github.com/ash0ts) [![](https://avatars.githubusercontent.com/u/119620994?u=ac3dfad90764c69144f593023fce93080586702e&v=4)](https://github.com/Honkware)[@Honkware](https://github.com/Honkware) [![](https://avatars.githubusercontent.com/u/4524535?u=6a41acd9f233fa9e62294d5534d1f2f52faa6b78&v=4)](https://github.com/dwhitena)[@dwhitena](https://github.com/dwhitena) [![](https://avatars.githubusercontent.com/u/21286981?v=4)](https://github.com/SagarBM396)[@SagarBM396](https://github.com/SagarBM396) [![](https://avatars.githubusercontent.com/u/88007022?u=1d49b0aa10dcff5b6661b211331334c165c56f28&v=4)](https://github.com/jamie256)[@jamie256](https://github.com/jamie256) [![](https://avatars.githubusercontent.com/u/2283778?u=0c5a2a583bc77b138b346c5974551ac459059026&v=4)](https://github.com/yanghua)[@yanghua](https://github.com/yanghua) [![](https://avatars.githubusercontent.com/u/62718109?u=ab38af3009ae3adcff49a309580e55bc6f586ba2&v=4)](https://github.com/klein-t)[@klein-t](https://github.com/klein-t) [![](https://avatars.githubusercontent.com/u/110841617?u=e473cda5a87ca1dae11082c11db9c1ed1f4c7032&v=4)](https://github.com/erika-cardenas)[@erika-cardenas](https://github.com/erika-cardenas) [![](https://avatars.githubusercontent.com/u/13636019?v=4)](https://github.com/Ayan-Bandyopadhyay)[@Ayan-Bandyopadhyay](https://github.com/Ayan-Bandyopadhyay) [![](https://avatars.githubusercontent.com/u/27293258?u=3349429e2b89bb75f144bb22c4015d9b676f3fca&v=4)](https://github.com/tugot17)[@tugot17](https://github.com/tugot17) [![](https://avatars.githubusercontent.com/u/13009163?u=c2b3a11cceaadbc9415f545b971250c9e2b2078b&v=4)](https://github.com/Spartee)[@Spartee](https://github.com/Spartee) [![](https://avatars.githubusercontent.com/u/22459070?u=c541f86a16a5b46ae138a7bf1efdce36dd413f24&v=4)](https://github.com/Jflick58)[@Jflick58](https://github.com/Jflick58) [![](https://avatars.githubusercontent.com/u/20140126?u=d1b9220a46efe488dc3db52e5d92774d85d38dfc&v=4)](https://github.com/JuHyung-Son)[@JuHyung-Son](https://github.com/JuHyung-Son) [![](https://avatars.githubusercontent.com/u/949393?u=66d8768dc44519c956069acd88cfb1b0dca646f8&v=4)](https://github.com/stewartjarod)[@stewartjarod](https://github.com/stewartjarod) [![](https://avatars.githubusercontent.com/u/8279655?v=4)](https://github.com/cxumol)[@cxumol](https://github.com/cxumol) [![](https://avatars.githubusercontent.com/u/31288628?u=acdfcef703b0d07b69e70e32e20130c05a56a549&v=4)](https://github.com/rihardsgravis)[@rihardsgravis](https://github.com/rihardsgravis) [![](https://avatars.githubusercontent.com/u/31483498?u=aa8561cc1055386d7753a7f82bf823bbdbae4919&v=4)](https://github.com/kouroshHakha)[@kouroshHakha](https://github.com/kouroshHakha) [![](https://avatars.githubusercontent.com/u/24364830?u=ae92d85547ad5a3bfe9967ec333c6a1b775d1204&v=4)](https://github.com/ByronHsu)[@ByronHsu](https://github.com/ByronHsu) [![](https://avatars.githubusercontent.com/u/28208564?u=ab938a1030cc6d630609a6d76b1ada65a3009020&v=4)](https://github.com/O-Roma)[@O-Roma](https://github.com/O-Roma) [![](https://avatars.githubusercontent.com/u/808798?u=8a25786f1b28a0ddf171299eee7c14d9e9f2939b&v=4)](https://github.com/rowillia)[@rowillia](https://github.com/rowillia) [![](https://avatars.githubusercontent.com/u/13447955?v=4)](https://github.com/lesterpjy)[@lesterpjy](https://github.com/lesterpjy) [![](https://avatars.githubusercontent.com/u/19216250?u=85921f52a4be080e3529d87d3e3e75bf83847b24&v=4)](https://github.com/junefish)[@junefish](https://github.com/junefish) [![](https://avatars.githubusercontent.com/u/107998986?u=70520f8a4ad962c0fc2706649ec401b274681927&v=4)](https://github.com/2jimoo)[@2jimoo](https://github.com/2jimoo) [![](https://avatars.githubusercontent.com/u/55656?u=b9b6aa80966abd617ffed498f3a15b20d3644604&v=4)](https://github.com/petervandenabeele)[@petervandenabeele](https://github.com/petervandenabeele) [![](https://avatars.githubusercontent.com/u/17451563?v=4)](https://github.com/shahrin014)[@shahrin014](https://github.com/shahrin014) [![](https://avatars.githubusercontent.com/u/3849275?u=5de71c0b6eaea94c0460c1dc18a1a346168f8720&v=4)](https://github.com/shoelsch)[@shoelsch](https://github.com/shoelsch) [![](https://avatars.githubusercontent.com/u/45851384?u=c9c158b6040b1fd8ae5543bad513260e157d5892&v=4)](https://github.com/h0rv)[@h0rv](https://github.com/h0rv) [![](https://avatars.githubusercontent.com/u/19825685?u=c9346281a8534aeaf9f112c0f7ca749de5cb8e23&v=4)](https://github.com/JoanFM)[@JoanFM](https://github.com/JoanFM) [![](https://avatars.githubusercontent.com/u/18037290?u=73f09eb601032e6ff84af14ab80ac8c8c9cebff3&v=4)](https://github.com/asai95)[@asai95](https://github.com/asai95) [![](https://avatars.githubusercontent.com/u/3195154?u=baa3820b95103662bc2aca01959e41aa651764b5&v=4)](https://github.com/mgoin)[@mgoin](https://github.com/mgoin) [![](https://avatars.githubusercontent.com/u/23445657?u=84dda94e9330c5538ea94099b5cae699c88586f8&v=4)](https://github.com/Blaizzy)[@Blaizzy](https://github.com/Blaizzy) [![](https://avatars.githubusercontent.com/u/38002468?u=dd6ba12322fa2ee0d88e83a3773c8abc13ec37af&v=4)](https://github.com/akmhmgc)[@akmhmgc](https://github.com/akmhmgc) [![](https://avatars.githubusercontent.com/u/4693180?u=8cf781d9099d6e2f2d2caf7612a5c2811ba13ef8&v=4)](https://github.com/gmpetrov)[@gmpetrov](https://github.com/gmpetrov) [![](https://avatars.githubusercontent.com/u/29749331?u=a7f4d7db2faa6af42af8d43b2737b5547d36154d&v=4)](https://github.com/aarnphm)[@aarnphm](https://github.com/aarnphm) [![](https://avatars.githubusercontent.com/u/43019056?u=9066bb1f7b39a46309c387650c0ce5b7423f79da&v=4)](https://github.com/aMahanna)[@aMahanna](https://github.com/aMahanna) [![](https://avatars.githubusercontent.com/u/39014459?v=4)](https://github.com/hp0404)[@hp0404](https://github.com/hp0404) [![](https://avatars.githubusercontent.com/u/2098020?u=0e1ecc0cc5eab98d93c0eaa7e210a1de937d95d9&v=4)](https://github.com/liushuaikobe)[@liushuaikobe](https://github.com/liushuaikobe) [![](https://avatars.githubusercontent.com/u/115371133?u=a032d8cc4a47b9a25bc7a1699a73506bdb752ea2&v=4)](https://github.com/fserv)[@fserv](https://github.com/fserv) [![](https://avatars.githubusercontent.com/u/5289083?u=d663551cd0b6e74091abd6272c35c9e02e82d6c0&v=4)](https://github.com/seanmavley)[@seanmavley](https://github.com/seanmavley) [![](https://avatars.githubusercontent.com/u/37284105?u=be61bf8a5cef1060aeeb63a9bdd0a18f2edfe8d1&v=4)](https://github.com/cloudscool)[@cloudscool](https://github.com/cloudscool) [![](https://avatars.githubusercontent.com/u/243665?u=4f7f2b3bbc666f530bf0e61bf6a4b32f5fcec433&v=4)](https://github.com/Lothiraldan)[@Lothiraldan](https://github.com/Lothiraldan) [![](https://avatars.githubusercontent.com/u/2106106?u=e59f1d37d627161dc1739d290d1aedfb7348f1ab&v=4)](https://github.com/Ather23)[@Ather23](https://github.com/Ather23) [![](https://avatars.githubusercontent.com/u/143642606?u=83091119b6b84c82b741298e9c9252161868bae7&v=4)](https://github.com/mogith-pn)[@mogith-pn](https://github.com/mogith-pn) [![](https://avatars.githubusercontent.com/u/6266815?v=4)](https://github.com/JohnnyDeuss)[@JohnnyDeuss](https://github.com/JohnnyDeuss) [![](https://avatars.githubusercontent.com/u/43149077?u=26d40f875b701db58f54af0441501c12e86dec6f&v=4)](https://github.com/dakinggg)[@dakinggg](https://github.com/dakinggg) [![](https://avatars.githubusercontent.com/u/32113413?u=069f880e88a96db6ad955e3cc9fc7f9dfcf2beef&v=4)](https://github.com/jackwotherspoon)[@jackwotherspoon](https://github.com/jackwotherspoon) [![](https://avatars.githubusercontent.com/u/4492530?u=142efae122e461996caa5cc6d41b9b5f0549c047&v=4)](https://github.com/philippe2803)[@philippe2803](https://github.com/philippe2803) [![](https://avatars.githubusercontent.com/u/2644049?v=4)](https://github.com/wnleao)[@wnleao](https://github.com/wnleao) [![](https://avatars.githubusercontent.com/u/73353463?u=b07dac98e10a359f1a21dc08e61144e3671ca22f&v=4)](https://github.com/hmasdev)[@hmasdev](https://github.com/hmasdev) [![](https://avatars.githubusercontent.com/u/99611484?u=f421fe8a2917ae3ea24d83f056646055a00d3174&v=4)](https://github.com/kdcokenny)[@kdcokenny](https://github.com/kdcokenny) [![](https://avatars.githubusercontent.com/u/3761730?u=16424feb9e18fc01df9d2c58699454f3016e79db&v=4)](https://github.com/qtangs)[@qtangs](https://github.com/qtangs) [![](https://avatars.githubusercontent.com/u/1651790?u=5a5ea37c495f7787f35172f0f86569daf5a5a65e&v=4)](https://github.com/wey-gu)[@wey-gu](https://github.com/wey-gu) [![](https://avatars.githubusercontent.com/u/57228345?v=4)](https://github.com/CahidArda)[@CahidArda](https://github.com/CahidArda) [![](https://avatars.githubusercontent.com/u/54905519?u=9818cccb258351fd0abec07b4acfb414a0383823&v=4)](https://github.com/Sukitly)[@Sukitly](https://github.com/Sukitly) [![](https://avatars.githubusercontent.com/u/2951285?u=571c795227b4edbd29f027478346834f83a95076&v=4)](https://github.com/samber)[@samber](https://github.com/samber) [![](https://avatars.githubusercontent.com/u/601530?u=ab242d6500886c4f8799101543d5b1f7841f1104&v=4)](https://github.com/Atry)[@Atry](https://github.com/Atry) [![](https://avatars.githubusercontent.com/u/2700370?u=421c7cd75c8f7f1a28e6f6c19a5d587a6d478ed0&v=4)](https://github.com/chosh0615)[@chosh0615](https://github.com/chosh0615) [![](https://avatars.githubusercontent.com/u/3009596?u=bbc154ae159c938e6e0c4045dc1b7980696b402a&v=4)](https://github.com/avsolatorio)[@avsolatorio](https://github.com/avsolatorio) [![](https://avatars.githubusercontent.com/u/90301759?v=4)](https://github.com/19374242)[@19374242](https://github.com/19374242) [![](https://avatars.githubusercontent.com/u/4491983?u=9265a9310ce2fa08b9429dc5d68da5b8677058ba&v=4)](https://github.com/leedotpang)[@leedotpang](https://github.com/leedotpang) [![](https://avatars.githubusercontent.com/u/39889?u=bd28816c18beaddc4da762d61d842547fdb271d9&v=4)](https://github.com/yarikoptic)[@yarikoptic](https://github.com/yarikoptic) [![](https://avatars.githubusercontent.com/u/52778543?u=504d8eb452ab2103a86ab469dd793eab49c8a437&v=4)](https://github.com/Jofthomas)[@Jofthomas](https://github.com/Jofthomas) [![](https://avatars.githubusercontent.com/u/57748216?u=e2029e1262ee9c9d9f5825b2d28952758a628f28&v=4)](https://github.com/marlenezw)[@marlenezw](https://github.com/marlenezw) [![](https://avatars.githubusercontent.com/u/23070692?u=bc8389d4c965994dee5b8cbadc420f8b4bcd5f0b&v=4)](https://github.com/rancomp)[@rancomp](https://github.com/rancomp) [![](https://avatars.githubusercontent.com/u/1540803?v=4)](https://github.com/morganda)[@morganda](https://github.com/morganda) [![](https://avatars.githubusercontent.com/u/1302641?u=643198eed0646ee2e18e22d6b6dab509bf9b2505&v=4)](https://github.com/atroyn)[@atroyn](https://github.com/atroyn) [![](https://avatars.githubusercontent.com/u/48685774?v=4)](https://github.com/dmenini)[@dmenini](https://github.com/dmenini) [![](https://avatars.githubusercontent.com/u/987457?u=a0dcd7b2cac59237d1ac2b43ca67a328ea7c437a&v=4)](https://github.com/brotchie)[@brotchie](https://github.com/brotchie) [![](https://avatars.githubusercontent.com/u/32129522?u=a6fc430ee58b3ebe776dec5fce16b686f81c8e12&v=4)](https://github.com/angeligareta)[@angeligareta](https://github.com/angeligareta) [![](https://avatars.githubusercontent.com/u/5279578?u=ce483437f50a425eab4b1f6f635ac49159f31576&v=4)](https://github.com/mmajewsk)[@mmajewsk](https://github.com/mmajewsk) [![](https://avatars.githubusercontent.com/u/3480154?u=f69c138e15366ba9c15cafd3c753a7ba7da44ad5&v=4)](https://github.com/wangwei1237)[@wangwei1237](https://github.com/wangwei1237) [![](https://avatars.githubusercontent.com/u/116048415?v=4)](https://github.com/nimimeht)[@nimimeht](https://github.com/nimimeht) [![](https://avatars.githubusercontent.com/u/5055697?v=4)](https://github.com/alexiri)[@alexiri](https://github.com/alexiri) [![](https://avatars.githubusercontent.com/u/12781611?v=4)](https://github.com/rjanardhan3)[@rjanardhan3](https://github.com/rjanardhan3) [![](https://avatars.githubusercontent.com/u/136875?u=611195240df6f68e816214bb865174384b74437e&v=4)](https://github.com/msaelices)[@msaelices](https://github.com/msaelices) [![](https://avatars.githubusercontent.com/u/21985684?u=96e4830f5dfb5a4a6fcb504fddec997a50b56413&v=4)](https://github.com/SimFG)[@SimFG](https://github.com/SimFG) [![](https://avatars.githubusercontent.com/u/16047967?v=4)](https://github.com/StankoKuveljic)[@StankoKuveljic](https://github.com/StankoKuveljic) [![](https://avatars.githubusercontent.com/u/40655746?u=3c10115601fd5b032c3f274e79fd68dc5bb03921&v=4)](https://github.com/quchuyuan)[@quchuyuan](https://github.com/quchuyuan) [![](https://avatars.githubusercontent.com/u/82044803?u=451c2955f0862cccf64cac30e062570d208d6903&v=4)](https://github.com/serena-ruan)[@serena-ruan](https://github.com/serena-ruan) [![](https://avatars.githubusercontent.com/u/151817113?v=4)](https://github.com/sirjan-ws-ext)[@sirjan-ws-ext](https://github.com/sirjan-ws-ext) [![](https://avatars.githubusercontent.com/u/147840?v=4)](https://github.com/anentropic)[@anentropic](https://github.com/anentropic) [![](https://avatars.githubusercontent.com/u/65639964?u=6a48b9ecb8e188fee4117bffb055afb54566ba97&v=4)](https://github.com/EricLiclair)[@EricLiclair](https://github.com/EricLiclair) [![](https://avatars.githubusercontent.com/u/23413676?u=b5bef760f9d067457f460d4dd5036f7e5f50d197&v=4)](https://github.com/hsuyuming)[@hsuyuming](https://github.com/hsuyuming) [![](https://avatars.githubusercontent.com/u/1751809?u=b247b34fa5ccf9bb276ae318d57af47680994600&v=4)](https://github.com/asofter)[@asofter](https://github.com/asofter) [![](https://avatars.githubusercontent.com/u/16456186?u=b9b30585eb3ddd0c8819bda9694636303c510233&v=4)](https://github.com/ThatsJustCheesy)[@ThatsJustCheesy](https://github.com/ThatsJustCheesy) [![](https://avatars.githubusercontent.com/u/7942293?u=6d5e295620df234b697f25d94659ae85d2dd2060&v=4)](https://github.com/imeckr)[@imeckr](https://github.com/imeckr) [![](https://avatars.githubusercontent.com/u/7935430?v=4)](https://github.com/rc19)[@rc19](https://github.com/rc19) [![](https://avatars.githubusercontent.com/u/3982077?u=8bbebac42cb84a25c629f83f212b2d099ffa3964&v=4)](https://github.com/anthonychu)[@anthonychu](https://github.com/anthonychu) [![](https://avatars.githubusercontent.com/u/1664952?u=38196f73e9e69e2cc4f6d2e1207647af87bc440a&v=4)](https://github.com/h3l)[@h3l](https://github.com/h3l) [![](https://avatars.githubusercontent.com/u/6726111?u=57f5f48085f552366bc8cf19ecd1d4ad0c66cd48&v=4)](https://github.com/JensMadsen)[@JensMadsen](https://github.com/JensMadsen) [![](https://avatars.githubusercontent.com/u/17705063?v=4)](https://github.com/Raj725)[@Raj725](https://github.com/Raj725) [![](https://avatars.githubusercontent.com/u/61808204?v=4)](https://github.com/akiradev0x)[@akiradev0x](https://github.com/akiradev0x) [![](https://avatars.githubusercontent.com/u/160063452?v=4)](https://github.com/fzowl)[@fzowl](https://github.com/fzowl) [![](https://avatars.githubusercontent.com/u/5136688?u=471ef01a31cc054f84abbe1b9e77ce07b2ac6853&v=4)](https://github.com/mlejva)[@mlejva](https://github.com/mlejva) [![](https://avatars.githubusercontent.com/u/5564852?u=bb4393ab0f6ea892733e5fa10294207c1cf157f7&v=4)](https://github.com/msetbar)[@msetbar](https://github.com/msetbar) [![](https://avatars.githubusercontent.com/u/841146?v=4)](https://github.com/DaveDeCaprio)[@DaveDeCaprio](https://github.com/DaveDeCaprio) [![](https://avatars.githubusercontent.com/u/120141355?u=c114874e969ef4e38c54d042fe1b9a69bc634483&v=4)](https://github.com/j-space-b)[@j-space-b](https://github.com/j-space-b) [![](https://avatars.githubusercontent.com/u/50950969?u=f0c166782c1b8f63eb983383729b5d109d7bed0a&v=4)](https://github.com/chrispy-snps)[@chrispy-snps](https://github.com/chrispy-snps) [![](https://avatars.githubusercontent.com/u/1863868?u=b00a9408d1433919780ea3248b3fc21258172152&v=4)](https://github.com/amosjyng)[@amosjyng](https://github.com/amosjyng) [![](https://avatars.githubusercontent.com/u/33070862?v=4)](https://github.com/cwlacewe)[@cwlacewe](https://github.com/cwlacewe) [![](https://avatars.githubusercontent.com/u/38786?u=10a7cbcfb424bf45b3858017dc8cffae82adde29&v=4)](https://github.com/ninjapenguin)[@ninjapenguin](https://github.com/ninjapenguin) [![](https://avatars.githubusercontent.com/u/12752197?u=f4f5d6c5b040422eaa987d0c7f441c65a1266db5&v=4)](https://github.com/dvonthenen)[@dvonthenen](https://github.com/dvonthenen) [![](https://avatars.githubusercontent.com/u/56083056?v=4)](https://github.com/HamJaw1432)[@HamJaw1432](https://github.com/HamJaw1432) [![](https://avatars.githubusercontent.com/u/538203?u=b3a13cce34acb23a3ef2808ee54c3461f2fa85bb&v=4)](https://github.com/cristobalcl)[@cristobalcl](https://github.com/cristobalcl) [![](https://avatars.githubusercontent.com/u/17561003?u=76de0b85da74806eaad024ebc3315201ba49e867&v=4)](https://github.com/krrishdholakia)[@krrishdholakia](https://github.com/krrishdholakia) [![](https://avatars.githubusercontent.com/u/27777173?u=4490be52549d8b6d2a662f35068b9a0d625b4b66&v=4)](https://github.com/samhita-alla)[@samhita-alla](https://github.com/samhita-alla) [![](https://avatars.githubusercontent.com/u/3906177?u=3e7cb909eded61c3a35cb0e11336a70d0bc05534&v=4)](https://github.com/ralewis85)[@ralewis85](https://github.com/ralewis85) [![](https://avatars.githubusercontent.com/u/6785029?v=4)](https://github.com/finnless)[@finnless](https://github.com/finnless) [![](https://avatars.githubusercontent.com/u/45704090?u=fe471820f7f3939783ddea78efa0ef1f0d86288e&v=4)](https://github.com/felixocker)[@felixocker](https://github.com/felixocker) [![](https://avatars.githubusercontent.com/u/433221?u=714ae935eadb460e1a7d41d7d29e26c7fed0bbbf&v=4)](https://github.com/brendancol)[@brendancol](https://github.com/brendancol) [![](https://avatars.githubusercontent.com/u/22055188?u=779840a35ef12f6734b630b1bdedd694132ec68f&v=4)](https://github.com/juliensalinas)[@juliensalinas](https://github.com/juliensalinas) [![](https://avatars.githubusercontent.com/u/69706702?u=4fe850984b0956793de0a67c7ed9141168942eef&v=4)](https://github.com/muntaqamahmood)[@muntaqamahmood](https://github.com/muntaqamahmood) [![](https://avatars.githubusercontent.com/u/11441526?u=bbd26dd43cf43212b0b05601ed5aaf29727f5d9f&v=4)](https://github.com/Fei-Wang)[@Fei-Wang](https://github.com/Fei-Wang) [![](https://avatars.githubusercontent.com/u/45267439?u=d2ad5da7ef06e928644321e7a1cfd16842a897db&v=4)](https://github.com/jupyterjazz)[@jupyterjazz](https://github.com/jupyterjazz) [![](https://avatars.githubusercontent.com/u/17061663?u=bee0295d999ddb902a98872fac6009bb88950132&v=4)](https://github.com/kooyunmo)[@kooyunmo](https://github.com/kooyunmo) [![](https://avatars.githubusercontent.com/u/7340008?u=9473b1cdea8b9929771b32f14a28ad702237900c&v=4)](https://github.com/donbr)[@donbr](https://github.com/donbr) [![](https://avatars.githubusercontent.com/u/22361806?u=c6b2eec689b859aeb182654e5e67936886d860bb&v=4)](https://github.com/jdogmcsteezy)[@jdogmcsteezy](https://github.com/jdogmcsteezy) [![](https://avatars.githubusercontent.com/u/367522?u=2b439b16d48aaea7f17d1b3b0b24a9cb0b8712ed&v=4)](https://github.com/borisdev)[@borisdev](https://github.com/borisdev) [![](https://avatars.githubusercontent.com/u/14931371?u=2f570f7591396a1ab8b58777746e2412e154fbfa&v=4)](https://github.com/jasonwcfan)[@jasonwcfan](https://github.com/jasonwcfan) [![](https://avatars.githubusercontent.com/u/31998003?u=0d91cde56e2c25d8ee7447bc55099e3dad047e99&v=4)](https://github.com/kristapratico)[@kristapratico](https://github.com/kristapratico) [![](https://avatars.githubusercontent.com/u/46003469?u=4f64d04035d962af0f72d20bffd6ea61635e728e&v=4)](https://github.com/yilmaz-burak)[@yilmaz-burak](https://github.com/yilmaz-burak) [![](https://avatars.githubusercontent.com/u/8552242?v=4)](https://github.com/yessenzhar)[@yessenzhar](https://github.com/yessenzhar) [![](https://avatars.githubusercontent.com/u/84070455?v=4)](https://github.com/pjb157)[@pjb157](https://github.com/pjb157) [![](https://avatars.githubusercontent.com/u/202907?u=a1060b9fd298fd84b1adb7f6874c5c2012e782dc&v=4)](https://github.com/krasserm)[@krasserm](https://github.com/krasserm) [![](https://avatars.githubusercontent.com/u/8673939?v=4)](https://github.com/NickL77)[@NickL77](https://github.com/NickL77) [![](https://avatars.githubusercontent.com/u/10400064?u=581d97314df325c15ec221f64834003d3bba5cc1&v=4)](https://github.com/mishushakov)[@mishushakov](https://github.com/mishushakov) [![](https://avatars.githubusercontent.com/u/1508364?u=e75aca2de6de1a1e57329fc0c6430e1341904318&v=4)](https://github.com/flash1293)[@flash1293](https://github.com/flash1293) [![](https://avatars.githubusercontent.com/u/6500104?u=c11cdf2671e89749d7d8c01f0d85494cce8d9f84&v=4)](https://github.com/Code-Hex)[@Code-Hex](https://github.com/Code-Hex) [![](https://avatars.githubusercontent.com/u/22690160?u=50f2d8aa99bd7b12c01df29e8ffe519ed1cff1d5&v=4)](https://github.com/jnis23)[@jnis23](https://github.com/jnis23) [![](https://avatars.githubusercontent.com/u/36752715?u=5137581b52bcbb8466b394f3ba40f97f9e273f52&v=4)](https://github.com/cgalo5758)[@cgalo5758](https://github.com/cgalo5758) [![](https://avatars.githubusercontent.com/u/17325195?u=dadc287a6784258704affce9bf91e03e1bb967b4&v=4)](https://github.com/raymond-yuan)[@raymond-yuan](https://github.com/raymond-yuan) [![](https://avatars.githubusercontent.com/u/101966044?v=4)](https://github.com/klae01)[@klae01](https://github.com/klae01) [![](https://avatars.githubusercontent.com/u/38317983?u=b169467874aeaf478132e46998ca895accfc008e&v=4)](https://github.com/LunarECL)[@LunarECL](https://github.com/LunarECL) [![](https://avatars.githubusercontent.com/u/12080578?v=4)](https://github.com/whiskyboy)[@whiskyboy](https://github.com/whiskyboy) [![](https://avatars.githubusercontent.com/u/66191792?v=4)](https://github.com/yuskhan)[@yuskhan](https://github.com/yuskhan) [![](https://avatars.githubusercontent.com/u/62583018?u=965202caa3cfc09516af257f0affdf4aae7cdd43&v=4)](https://github.com/akashAD98)[@akashAD98](https://github.com/akashAD98) [![](https://avatars.githubusercontent.com/u/45953733?u=b907b96d62f8cb2e75f3bba4f137d296d0d8a87f&v=4)](https://github.com/Shrined)[@Shrined](https://github.com/Shrined) [![](https://avatars.githubusercontent.com/u/17435126?u=62bec61ef256194a3bb3ab238ab71d1792decd08&v=4)](https://github.com/DavidLMS)[@DavidLMS](https://github.com/DavidLMS) [![](https://avatars.githubusercontent.com/u/4956442?u=fee6c76ff991cc9c12c4d703a1ad007e7634f58e&v=4)](https://github.com/rmkraus)[@rmkraus](https://github.com/rmkraus) [![](https://avatars.githubusercontent.com/u/20266953?u=32853a0ed47a83525f3f21b4baf63891e0e3de15&v=4)](https://github.com/rawwar)[@rawwar](https://github.com/rawwar) [![](https://avatars.githubusercontent.com/u/413669?u=25b5563194493db00c227a98e23f460adb13c9ea&v=4)](https://github.com/pmcfadin)[@pmcfadin](https://github.com/pmcfadin) [![](https://avatars.githubusercontent.com/u/25740077?u=1c3b2b59a52f332dc22ef1787f2cdc67dc9fea5e&v=4)](https://github.com/tricktreat)[@tricktreat](https://github.com/tricktreat) [![](https://avatars.githubusercontent.com/u/6334158?u=1d02d8cc173b20c7d18e11ac20a6f40081025fc3&v=4)](https://github.com/fzliu)[@fzliu](https://github.com/fzliu) [![](https://avatars.githubusercontent.com/u/15992114?u=39c8ea0ffb9f48cec04f9b473f2801327e716ba1&v=4)](https://github.com/dongreenberg)[@dongreenberg](https://github.com/dongreenberg) [![](https://avatars.githubusercontent.com/u/54540938?u=77dbfd10b709e203865f99668a4c79db04a69661&v=4)](https://github.com/aledelunap)[@aledelunap](https://github.com/aledelunap) [![](https://avatars.githubusercontent.com/u/1155052?v=4)](https://github.com/stonekim)[@stonekim](https://github.com/stonekim) [![](https://avatars.githubusercontent.com/u/6690727?u=d5742c8e658fe211a8987d9716838c34122485d0&v=4)](https://github.com/tonyabracadabra)[@tonyabracadabra](https://github.com/tonyabracadabra) [![](https://avatars.githubusercontent.com/u/2857712?u=6809bef8bf07c46b39cd2fcd6027ed86e76372cd&v=4)](https://github.com/machulav)[@machulav](https://github.com/machulav) [![](https://avatars.githubusercontent.com/u/12604876?u=a441926ef7f4dbc48fc3a1511f3ae5cb4279c464&v=4)](https://github.com/shauryr)[@shauryr](https://github.com/shauryr) [![](https://avatars.githubusercontent.com/u/42373772?v=4)](https://github.com/PawelFaron)[@PawelFaron](https://github.com/PawelFaron) [![](https://avatars.githubusercontent.com/u/104267837?u=762d6b00291c68379d66260d7b644942e3bab891&v=4)](https://github.com/lvliang-intel)[@lvliang-intel](https://github.com/lvliang-intel) [![](https://avatars.githubusercontent.com/u/8972416?u=8cef7c30a819e5157bece1f1e06a50beab52845f&v=4)](https://github.com/xinqiu)[@xinqiu](https://github.com/xinqiu) [![](https://avatars.githubusercontent.com/u/30035387?u=38717fe5778531ee96e5fc6e4a350668b5024d1c&v=4)](https://github.com/MikeMcGarry)[@MikeMcGarry](https://github.com/MikeMcGarry) [![](https://avatars.githubusercontent.com/u/20807672?u=f2efe9788ce26442bb3319da1a56081d64c359e5&v=4)](https://github.com/robcaulk)[@robcaulk](https://github.com/robcaulk) [![](https://avatars.githubusercontent.com/u/37783831?u=5697294c9a0c5bcca4df1aafd22cf8ab64081f2f&v=4)](https://github.com/jagilley)[@jagilley](https://github.com/jagilley) [![](https://avatars.githubusercontent.com/u/35005448?u=4b6efd3d2dcdc2acde843cff4183b59087f35a9b&v=4)](https://github.com/prrao87)[@prrao87](https://github.com/prrao87) [![](https://avatars.githubusercontent.com/u/31956487?u=4693ce4d533d97386b62851f6790881306cb88bc&v=4)](https://github.com/lujingxuansc)[@lujingxuansc](https://github.com/lujingxuansc) [![](https://avatars.githubusercontent.com/u/15329913?u=d6a01e3a63eb3ef04e5917f994fc2f809f28dd13&v=4)](https://github.com/mplachter)[@mplachter](https://github.com/mplachter) [![](https://avatars.githubusercontent.com/u/46458320?u=f752991f6c37b213ad11fdae5bf7820aa59b93d0&v=4)](https://github.com/jvelezmagic)[@jvelezmagic](https://github.com/jvelezmagic) [![](https://avatars.githubusercontent.com/u/50772274?u=5d63cb1b53e5702ea3dd12f865c3b9b252f37a02&v=4)](https://github.com/patrickloeber)[@patrickloeber](https://github.com/patrickloeber) [![](https://avatars.githubusercontent.com/u/16231195?u=cb98dd7c537280ed31b53108f31286bd50989aea&v=4)](https://github.com/trancethehuman)[@trancethehuman](https://github.com/trancethehuman) [![](https://avatars.githubusercontent.com/u/68764?v=4)](https://github.com/vadimgu)[@vadimgu](https://github.com/vadimgu) [![](https://avatars.githubusercontent.com/u/146365078?v=4)](https://github.com/hulitaitai)[@hulitaitai](https://github.com/hulitaitai) [![](https://avatars.githubusercontent.com/u/6885889?u=0b15031859ad908eb11af83878000ab09bed5609&v=4)](https://github.com/cjcjameson)[@cjcjameson](https://github.com/cjcjameson) [![](https://avatars.githubusercontent.com/u/69208727?u=132c8ca18143866b79253a6fcbc10f58984f61ab&v=4)](https://github.com/aymeric-roucher)[@aymeric-roucher](https://github.com/aymeric-roucher) [![](https://avatars.githubusercontent.com/u/24295927?u=27eee7ea85bd7dfd9e918245b96de8c757f5a620&v=4)](https://github.com/Sandy247)[@Sandy247](https://github.com/Sandy247) [![](https://avatars.githubusercontent.com/u/3887295?u=55c8b3263df68b67f9b465c1758c78898f8b163b&v=4)](https://github.com/zoltan-fedor)[@zoltan-fedor](https://github.com/zoltan-fedor) [![](https://avatars.githubusercontent.com/u/160584887?v=4)](https://github.com/miri-bar)[@miri-bar](https://github.com/miri-bar) [![](https://avatars.githubusercontent.com/u/19657350?u=9847c9919a636e9d7022803e829ffd80008cb2d3&v=4)](https://github.com/berkedilekoglu)[@berkedilekoglu](https://github.com/berkedilekoglu) [![](https://avatars.githubusercontent.com/u/55082429?v=4)](https://github.com/maang-h)[@maang-h](https://github.com/maang-h) [![](https://avatars.githubusercontent.com/u/141281053?u=e3ff32e9ae51ff0cca84b482fc1e6c80c28ab0c6&v=4)](https://github.com/rodrigo-clickup)[@rodrigo-clickup](https://github.com/rodrigo-clickup) [![](https://avatars.githubusercontent.com/u/35718120?u=af59f3ac14a23d1f2e09942415ac07c10f3a3d05&v=4)](https://github.com/numb3r3)[@numb3r3](https://github.com/numb3r3) [![](https://avatars.githubusercontent.com/u/42609308?u=3f7f530d338e33205815639ad3dfe7c244455728&v=4)](https://github.com/svdeepak99)[@svdeepak99](https://github.com/svdeepak99) [![](https://avatars.githubusercontent.com/u/97558871?v=4)](https://github.com/ZyeG)[@ZyeG](https://github.com/ZyeG) [![](https://avatars.githubusercontent.com/u/30483654?u=95e2c59c64c99e4ba77cffb8b2c180f7b44c6a74&v=4)](https://github.com/NoahStapp)[@NoahStapp](https://github.com/NoahStapp) [![](https://avatars.githubusercontent.com/u/709022?v=4)](https://github.com/tconkling)[@tconkling](https://github.com/tconkling) [![](https://avatars.githubusercontent.com/u/986859?u=54d240cfd5355bb0cfdaf4ac0a9589963ae9ccab&v=4)](https://github.com/toshish)[@toshish](https://github.com/toshish) [![](https://avatars.githubusercontent.com/u/1087039?u=4439c00ef507bef0a99d82cdec33d6d0ed53d67c&v=4)](https://github.com/dremeika)[@dremeika](https://github.com/dremeika) [![](https://avatars.githubusercontent.com/u/49049296?u=26427e6e1aa0a8ac20cc10594664b59a017f5287&v=4)](https://github.com/mingkang111)[@mingkang111](https://github.com/mingkang111) [![](https://avatars.githubusercontent.com/u/13622183?u=c23256501191447d645cc03c1f6bc83282ef1498&v=4)](https://github.com/liaokongVFX)[@liaokongVFX](https://github.com/liaokongVFX) [![](https://avatars.githubusercontent.com/u/36044389?u=e669016609aeb3e08e4f2a50f4faa163d633c073&v=4)](https://github.com/0xRaduan)[@0xRaduan](https://github.com/0xRaduan) [![](https://avatars.githubusercontent.com/u/127370261?v=4)](https://github.com/apeng-singlestore)[@apeng-singlestore](https://github.com/apeng-singlestore) [![](https://avatars.githubusercontent.com/u/252377?v=4)](https://github.com/jeffkit)[@jeffkit](https://github.com/jeffkit) [![](https://avatars.githubusercontent.com/u/158216624?v=4)](https://github.com/xsai9101)[@xsai9101](https://github.com/xsai9101) [![](https://avatars.githubusercontent.com/u/38943595?v=4)](https://github.com/issam9)[@issam9](https://github.com/issam9) [![](https://avatars.githubusercontent.com/u/56953648?v=4)](https://github.com/Dobiichi-Origami)[@Dobiichi-Origami](https://github.com/Dobiichi-Origami) [![](https://avatars.githubusercontent.com/u/131272471?v=4)](https://github.com/CogniJT)[@CogniJT](https://github.com/CogniJT) [![](https://avatars.githubusercontent.com/u/87355704?u=e98091da04c6bfe9af8d982938556832f03fb1fb&v=4)](https://github.com/ivyas21)[@ivyas21](https://github.com/ivyas21) [![](https://avatars.githubusercontent.com/u/90619575?u=a99d480b1238cfdb2dabcd2fe60d1110518049d9&v=4)](https://github.com/florian-morel22)[@florian-morel22](https://github.com/florian-morel22) [![](https://avatars.githubusercontent.com/u/22898443?u=4e6aceb9132747788c4b6aca6c16027ee1109b01&v=4)](https://github.com/sdan)[@sdan](https://github.com/sdan) [![](https://avatars.githubusercontent.com/u/16283396?v=4)](https://github.com/samching)[@samching](https://github.com/samching) [![](https://avatars.githubusercontent.com/u/306671?u=27f910f1bdcdf18622fcccc138274be885cf1058&v=4)](https://github.com/lukestanley)[@lukestanley](https://github.com/lukestanley) [![](https://avatars.githubusercontent.com/u/63134180?v=4)](https://github.com/IlyaKIS1)[@IlyaKIS1](https://github.com/IlyaKIS1) [![](https://avatars.githubusercontent.com/u/4432788?u=6883ca123ef6ea5c06b6353183e4f92574b4e152&v=4)](https://github.com/dosuken123)[@dosuken123](https://github.com/dosuken123) [![](https://avatars.githubusercontent.com/u/356014?u=51c0f2becf914c1cb7fce2d2f184a9d0ae89eae7&v=4)](https://github.com/wietsevenema)[@wietsevenema](https://github.com/wietsevenema) [![](https://avatars.githubusercontent.com/u/157405112?u=f34aa80161ad2eab0db9255661f4bd7d685cbd0c&v=4)](https://github.com/gustavo-yt)[@gustavo-yt](https://github.com/gustavo-yt) [![](https://avatars.githubusercontent.com/u/93204286?u=4b965586800fef342c6235fec47e9185b8ec1f81&v=4)](https://github.com/jonathanalgar)[@jonathanalgar](https://github.com/jonathanalgar) [![](https://avatars.githubusercontent.com/u/28803103?u=c0b795ec14b5536f0e757faf1eca1c1900d1ef3c&v=4)](https://github.com/vsxd)[@vsxd](https://github.com/vsxd) [![](https://avatars.githubusercontent.com/u/17221195?u=6182ec534d25d1c9ffe1667bd78ea28fd0eea4c8&v=4)](https://github.com/var77)[@var77](https://github.com/var77) [![](https://avatars.githubusercontent.com/u/54343137?u=0b69859aa8f8e5145d6fda66985a5c8a82c77524&v=4)](https://github.com/L-cloud)[@L-cloud](https://github.com/L-cloud) [![](https://avatars.githubusercontent.com/u/88005863?v=4)](https://github.com/matiasjacob25)[@matiasjacob25](https://github.com/matiasjacob25) [![](https://avatars.githubusercontent.com/u/1222232?v=4)](https://github.com/IlyaMichlin)[@IlyaMichlin](https://github.com/IlyaMichlin) [![](https://avatars.githubusercontent.com/u/6346981?u=8ae43f7d588ffcc184df5948d2d034cc29dc1d7d&v=4)](https://github.com/dzmitry-kankalovich)[@dzmitry-kankalovich](https://github.com/dzmitry-kankalovich) [![](https://avatars.githubusercontent.com/u/13366849?u=9f66646c23def822aac7d3dfecb49369bc8cdf7b&v=4)](https://github.com/EniasCailliau)[@EniasCailliau](https://github.com/EniasCailliau) [![](https://avatars.githubusercontent.com/u/68635?u=0ebec81cc881b2428e2c45e549a1081e5fe3cddf&v=4)](https://github.com/kreneskyp)[@kreneskyp](https://github.com/kreneskyp) [![](https://avatars.githubusercontent.com/u/4441850?u=532666e949309d38a33cda7b1e8b5f30fee0ef7c&v=4)](https://github.com/rsharath)[@rsharath](https://github.com/rsharath) [![](https://avatars.githubusercontent.com/u/21039333?u=bba2c2d18d3a5ef41360778a7679662565f326d2&v=4)](https://github.com/izapolsk)[@izapolsk](https://github.com/izapolsk) [![](https://avatars.githubusercontent.com/u/30639818?v=4)](https://github.com/rjadr)[@rjadr](https://github.com/rjadr) [![](https://avatars.githubusercontent.com/u/17973367?u=135d566bd1e620e230b94bf5252acea571ba510f&v=4)](https://github.com/Lord-Haji)[@Lord-Haji](https://github.com/Lord-Haji) [![](https://avatars.githubusercontent.com/u/85796?u=d66bb48107582804e6665cd33540cce5dea2fd8b&v=4)](https://github.com/woodworker)[@woodworker](https://github.com/woodworker) [![](https://avatars.githubusercontent.com/u/32632186?u=3e1b1b0d8cc37c998508e3ab83dc20ef1e2f57e0&v=4)](https://github.com/philschmid)[@philschmid](https://github.com/philschmid) [![](https://avatars.githubusercontent.com/u/13198452?v=4)](https://github.com/ChrKahl)[@ChrKahl](https://github.com/ChrKahl) [![](https://avatars.githubusercontent.com/u/8433665?u=1c39439298436f2acaa30c21863e02d3ba13af02&v=4)](https://github.com/bongsang)[@bongsang](https://github.com/bongsang) [![](https://avatars.githubusercontent.com/u/49571870?v=4)](https://github.com/clwillhuang)[@clwillhuang](https://github.com/clwillhuang) [![](https://avatars.githubusercontent.com/u/3122709?u=55c1160c7f870bcc582d2e0be42d5b1054262e04&v=4)](https://github.com/BidhanRoy)[@BidhanRoy](https://github.com/BidhanRoy) [![](https://avatars.githubusercontent.com/u/108248080?v=4)](https://github.com/proximal-phalanx)[@proximal-phalanx](https://github.com/proximal-phalanx) [![](https://avatars.githubusercontent.com/u/26385522?v=4)](https://github.com/hiigao)[@hiigao](https://github.com/hiigao) [![](https://avatars.githubusercontent.com/u/152659506?v=4)](https://github.com/samkhano1)[@samkhano1](https://github.com/samkhano1) [![](https://avatars.githubusercontent.com/u/45119610?u=27b4bbe257e0cc055c70f05dc6f45e95d5b09d08&v=4)](https://github.com/ireneisdoomed)[@ireneisdoomed](https://github.com/ireneisdoomed) [![](https://avatars.githubusercontent.com/u/12946725?u=42a21426742352cfbc210619eed7e76bc1bb5b22&v=4)](https://github.com/mahaddad)[@mahaddad](https://github.com/mahaddad) [![](https://avatars.githubusercontent.com/u/8368470?u=1b7aebda11db89d56b90ff89f9b108e3cd8bffe5&v=4)](https://github.com/thehapyone)[@thehapyone](https://github.com/thehapyone) [![](https://avatars.githubusercontent.com/u/18024571?u=c0e12c9590b7e0838b4ab96544bc875e08db0729&v=4)](https://github.com/tomhamer)[@tomhamer](https://github.com/tomhamer) [![](https://avatars.githubusercontent.com/u/1282617?u=940c2e3a241c82af68edc6adf81bc5da0fef0bbe&v=4)](https://github.com/haoch)[@haoch](https://github.com/haoch) [![](https://avatars.githubusercontent.com/u/32279503?u=b760deecdb05c098c0e4e19944b72bc22c6487dc&v=4)](https://github.com/SlapDrone)[@SlapDrone](https://github.com/SlapDrone) [![](https://avatars.githubusercontent.com/u/4302268?u=69a5af6602ab4faa803dcf60b2c50ed33cf44d89&v=4)](https://github.com/taranjeet)[@taranjeet](https://github.com/taranjeet) [![](https://avatars.githubusercontent.com/u/7312176?u=d986a46c4971c5d15feea254801efc5deb0bc358&v=4)](https://github.com/Pixeladed)[@Pixeladed](https://github.com/Pixeladed) [![](https://avatars.githubusercontent.com/u/8475708?v=4)](https://github.com/mlot)[@mlot](https://github.com/mlot) [![](https://avatars.githubusercontent.com/u/7282984?u=5e843c8eca6ff699d7a9e8b73f63b3f6dadcce04&v=4)](https://github.com/JGalego)[@JGalego](https://github.com/JGalego) [![](https://avatars.githubusercontent.com/u/21073184?u=deed6fe562ed425be66c210398811b664b5039a2&v=4)](https://github.com/xieqihui)[@xieqihui](https://github.com/xieqihui) [![](https://avatars.githubusercontent.com/u/9324867?v=4)](https://github.com/mhavey)[@mhavey](https://github.com/mhavey) [![](https://avatars.githubusercontent.com/u/4526224?u=3a47513ee686870ddcbecaa70756e3e8224732af&v=4)](https://github.com/praveenv)[@praveenv](https://github.com/praveenv) [![](https://avatars.githubusercontent.com/u/1734012?u=105d7344bcd5c0dee1a293d2740cefa05cc46b9b&v=4)](https://github.com/srics)[@srics](https://github.com/srics) [![](https://avatars.githubusercontent.com/u/31218485?u=6ce575b365c0353b5b3d1ea03088f8da36764100&v=4)](https://github.com/16BitNarwhal)[@16BitNarwhal](https://github.com/16BitNarwhal) [![](https://avatars.githubusercontent.com/u/12967560?v=4)](https://github.com/zhangch9)[@zhangch9](https://github.com/zhangch9) [![](https://avatars.githubusercontent.com/u/37284051?u=6a4bc9b65700fc4835aebec6bf6aab77acdaa233&v=4)](https://github.com/paulonasc)[@paulonasc](https://github.com/paulonasc) [![](https://avatars.githubusercontent.com/u/2008740?u=4c8824a259e14e56c2d3501e32a3422b258704c5&v=4)](https://github.com/rubell)[@rubell](https://github.com/rubell) [![](https://avatars.githubusercontent.com/u/37992436?u=21693d9e841c3b7f9f091a210fbeee7e415a0751&v=4)](https://github.com/izzymsft)[@izzymsft](https://github.com/izzymsft) [![](https://avatars.githubusercontent.com/u/22676399?u=6b46c5acfe16b722badbfa6845516c1627171bbe&v=4)](https://github.com/richarda23)[@richarda23](https://github.com/richarda23) [![](https://avatars.githubusercontent.com/u/7711036?v=4)](https://github.com/zifeiq)[@zifeiq](https://github.com/zifeiq) [![](https://avatars.githubusercontent.com/u/56812134?v=4)](https://github.com/liuyonghengheng)[@liuyonghengheng](https://github.com/liuyonghengheng) [![](https://avatars.githubusercontent.com/u/18428646?u=d26db3c0411bd1d62c1dca99e5c86dd1f7a3b53d&v=4)](https://github.com/tomaspiaggio)[@tomaspiaggio](https://github.com/tomaspiaggio) [![](https://avatars.githubusercontent.com/u/71321890?u=71a53f3a743fb8a91733e2a4cfcc05e309e3ef87&v=4)](https://github.com/klaus-xiong)[@klaus-xiong](https://github.com/klaus-xiong) [![](https://avatars.githubusercontent.com/u/16155041?u=bf86e1dd4aaeccde8ccf12bf8c16c494644b84e1&v=4)](https://github.com/alallema)[@alallema](https://github.com/alallema) [![](https://avatars.githubusercontent.com/u/8777479?v=4)](https://github.com/fengjial)[@fengjial](https://github.com/fengjial) [![](https://avatars.githubusercontent.com/u/18065113?u=6ea1812de26ecb108c18e50b719a109049d93ce2&v=4)](https://github.com/simon824)[@simon824](https://github.com/simon824) [![](https://avatars.githubusercontent.com/u/28787976?u=07c76df6dce5d38c056fb0783128844e6c70f4c4&v=4)](https://github.com/AksAman)[@AksAman](https://github.com/AksAman) [![](https://avatars.githubusercontent.com/u/14037726?u=e91cfcdb7606db58b059893368f3cf70a2340f5f&v=4)](https://github.com/mewim)[@mewim](https://github.com/mewim) [![](https://avatars.githubusercontent.com/u/4874?v=4)](https://github.com/ruanwz)[@ruanwz](https://github.com/ruanwz) [![](https://avatars.githubusercontent.com/u/1921353?v=4)](https://github.com/gdedrouas)[@gdedrouas](https://github.com/gdedrouas) [![](https://avatars.githubusercontent.com/u/1917451?u=f0d78c43c1f2d4bed080f9a8c46905d3c22a28c7&v=4)](https://github.com/mariokostelac)[@mariokostelac](https://github.com/mariokostelac) [![](https://avatars.githubusercontent.com/u/6432132?v=4)](https://github.com/samnoyes)[@samnoyes](https://github.com/samnoyes) [![](https://avatars.githubusercontent.com/u/22236370?u=289c19bfc89a43a7e0c6956f73305aab3a8bd978&v=4)](https://github.com/mosheber)[@mosheber](https://github.com/mosheber) [![](https://avatars.githubusercontent.com/u/8844262?u=1f09d2fe41756368730c3684fc819fbad940b4ac&v=4)](https://github.com/laplaceon)[@laplaceon](https://github.com/laplaceon) [![](https://avatars.githubusercontent.com/u/11781950?u=a34a78ac4d9dcc25fd084f423566c9443c2cc47d&v=4)](https://github.com/thepycoder)[@thepycoder](https://github.com/thepycoder) [![](https://avatars.githubusercontent.com/u/42592581?v=4)](https://github.com/toddkim95)[@toddkim95](https://github.com/toddkim95) [![](https://avatars.githubusercontent.com/u/950938?u=5283ce0f42f555abe0cd3eb9e45d23206c2ba6b8&v=4)](https://github.com/agamble)[@agamble](https://github.com/agamble) [![](https://avatars.githubusercontent.com/u/13607221?u=dcea34602eda8e96ea684d231bd5b597ba0c1a4f&v=4)](https://github.com/KastanDay)[@KastanDay](https://github.com/KastanDay) [![](https://avatars.githubusercontent.com/u/931697?u=4ce45d183c52828da0b4f0ca298d67ad970d43f6&v=4)](https://github.com/seanaedmiston)[@seanaedmiston](https://github.com/seanaedmiston) [![](https://avatars.githubusercontent.com/u/3028543?u=5096311a70425e82c9b1a143d29ccd502c155a7f&v=4)](https://github.com/Randl)[@Randl](https://github.com/Randl) [![](https://avatars.githubusercontent.com/u/115017354?v=4)](https://github.com/NikolaosPapailiou)[@NikolaosPapailiou](https://github.com/NikolaosPapailiou) [![](https://avatars.githubusercontent.com/u/460966?v=4)](https://github.com/ebrehault)[@ebrehault](https://github.com/ebrehault) [![](https://avatars.githubusercontent.com/u/32112894?u=d317c16ef9614adbeb3cf18ac39239c585db2264&v=4)](https://github.com/santiagxf)[@santiagxf](https://github.com/santiagxf) [![](https://avatars.githubusercontent.com/u/30162978?v=4)](https://github.com/thehappydinoa)[@thehappydinoa](https://github.com/thehappydinoa) [![](https://avatars.githubusercontent.com/u/30344258?u=51c169c8996024b68e9b3ec0bfe93465940dc8b4&v=4)](https://github.com/LMC117)[@LMC117](https://github.com/LMC117) [![](https://avatars.githubusercontent.com/u/131612909?v=4)](https://github.com/WilliamEspegren)[@WilliamEspegren](https://github.com/WilliamEspegren) [![](https://avatars.githubusercontent.com/u/7380988?u=ba9beadb7fd3bcd6d8439154bedbd32d5fdbd4d8&v=4)](https://github.com/sunbc0120)[@sunbc0120](https://github.com/sunbc0120) [![](https://avatars.githubusercontent.com/u/18614423?u=1d3dba8e4e87d2a449cc90c204f422327af2d09d&v=4)](https://github.com/Simon-Stone)[@Simon-Stone](https://github.com/Simon-Stone) [![](https://avatars.githubusercontent.com/u/15304273?u=7588e8d8f8a889950b0afd00c2457ec3126ce8f6&v=4)](https://github.com/Amyh102)[@Amyh102](https://github.com/Amyh102) [![](https://avatars.githubusercontent.com/u/67831673?v=4)](https://github.com/shumway743)[@shumway743](https://github.com/shumway743) [![](https://avatars.githubusercontent.com/u/12097018?u=ef0ff38c5959d7e7acf2c87e8e8051ca2d047c76&v=4)](https://github.com/gcheron)[@gcheron](https://github.com/gcheron) [![](https://avatars.githubusercontent.com/u/7102288?u=52db4849a0136c1d78cbc5a5de99ee0073384300&v=4)](https://github.com/zachdj)[@zachdj](https://github.com/zachdj) [![](https://avatars.githubusercontent.com/u/6980212?u=89202482380b379837fd7318dde75a00e83d2459&v=4)](https://github.com/ehsanmok)[@ehsanmok](https://github.com/ehsanmok) [![](https://avatars.githubusercontent.com/u/16619882?u=ed851c7ccfa20588d3cd5ca47e79d94c3e4b6427&v=4)](https://github.com/Trevato)[@Trevato](https://github.com/Trevato) [![](https://avatars.githubusercontent.com/u/13738772?u=1685c6916759c2ec986434af557343f6b29bce32&v=4)](https://github.com/raoufchebri)[@raoufchebri](https://github.com/raoufchebri) [![](https://avatars.githubusercontent.com/u/492616?u=c2ecf6dac54322df081577f6b8e1ca390535c4a6&v=4)](https://github.com/delgermurun)[@delgermurun](https://github.com/delgermurun) [![](https://avatars.githubusercontent.com/u/9665243?u=e403da70029d61dbbb9a2f0e03daebc5418974ed&v=4)](https://github.com/jcjc712)[@jcjc712](https://github.com/jcjc712) [![](https://avatars.githubusercontent.com/u/9089568?u=d2f8bc466003afc3558a96f3266a0e32d5c18c34&v=4)](https://github.com/EvilFreelancer)[@EvilFreelancer](https://github.com/EvilFreelancer) [![](https://avatars.githubusercontent.com/u/32046231?u=db454b8e6da48120d78d3397006928cc86f01019&v=4)](https://github.com/zywilliamli)[@zywilliamli](https://github.com/zywilliamli) [![](https://avatars.githubusercontent.com/u/48098520?u=aa4a7287f484eb32d408360ca340c2f5bc8444d0&v=4)](https://github.com/thaiminhpv)[@thaiminhpv](https://github.com/thaiminhpv) [![](https://avatars.githubusercontent.com/u/8139170?u=a63f55e62ad26febcd94e193c22bfd867d022af2&v=4)](https://github.com/paperMoose)[@paperMoose](https://github.com/paperMoose) [![](https://avatars.githubusercontent.com/u/71520361?v=4)](https://github.com/younis-bash)[@younis-bash](https://github.com/younis-bash) [![](https://avatars.githubusercontent.com/u/16340036?v=4)](https://github.com/rajib76)[@rajib76](https://github.com/rajib76) [![](https://avatars.githubusercontent.com/u/11153261?u=a5af26e0bd60a27ba4aba60d15b129fc410fe8cc&v=4)](https://github.com/ihpolash)[@ihpolash](https://github.com/ihpolash) [![](https://avatars.githubusercontent.com/u/123224380?v=4)](https://github.com/scadEfUr)[@scadEfUr](https://github.com/scadEfUr) [![](https://avatars.githubusercontent.com/u/51324450?u=25a4838c93e6237e3b6d6ea1fbd23442cfba5723&v=4)](https://github.com/SauhaardW)[@SauhaardW](https://github.com/SauhaardW) [![](https://avatars.githubusercontent.com/u/119924780?v=4)](https://github.com/pranava-amzn)[@pranava-amzn](https://github.com/pranava-amzn) [![](https://avatars.githubusercontent.com/u/16321871?u=9342b5e86b1e6c257e4024bed7e285470f466b8c&v=4)](https://github.com/fynnfluegge)[@fynnfluegge](https://github.com/fynnfluegge) [![](https://avatars.githubusercontent.com/u/2469198?u=43a8a9e376a5a7db6972e720906fd6f66560d235&v=4)](https://github.com/adilansari)[@adilansari](https://github.com/adilansari) [![](https://avatars.githubusercontent.com/u/13305222?u=6d00fe3cfd2414a9e309540fe49f532fc0e503dd&v=4)](https://github.com/bstadt)[@bstadt](https://github.com/bstadt) [![](https://avatars.githubusercontent.com/in/29110?v=4)](https://github.com/apps/dependabot)[@dependabot](https://github.com/apps/dependabot) [![](https://avatars.githubusercontent.com/u/42089598?v=4)](https://github.com/PenghuiCheng)[@PenghuiCheng](https://github.com/PenghuiCheng) [![](https://avatars.githubusercontent.com/u/145396613?u=f0da33ee8d74a5353a43f8df3332c9cac2bd70f8&v=4)](https://github.com/giannis2two)[@giannis2two](https://github.com/giannis2two) [![](https://avatars.githubusercontent.com/u/107621925?u=4a7b06f4c0cac2534521698383f58331c00c093f&v=4)](https://github.com/anilaltuner)[@anilaltuner](https://github.com/anilaltuner) [![](https://avatars.githubusercontent.com/u/144132509?u=42f5528898e3f4e3790bf432b8ca662dc347c778&v=4)](https://github.com/bu2kx)[@bu2kx](https://github.com/bu2kx) [![](https://avatars.githubusercontent.com/u/32715913?u=5de749a141259c3fdd8a16c6438aff2b7823fd69&v=4)](https://github.com/AmineDjeghri)[@AmineDjeghri](https://github.com/AmineDjeghri) [![](https://avatars.githubusercontent.com/u/1918816?v=4)](https://github.com/bakebrain)[@bakebrain](https://github.com/bakebrain) [![](https://avatars.githubusercontent.com/u/5349024?u=4875b6589899edb51cb083d209bd9fbfac58da18&v=4)](https://github.com/bburgin)[@bburgin](https://github.com/bburgin) [![](https://avatars.githubusercontent.com/u/2806769?u=2969d39e1099584bc34b9e91a718f97107b38cbc&v=4)](https://github.com/sreiswig)[@sreiswig](https://github.com/sreiswig) [![](https://avatars.githubusercontent.com/u/134934501?u=167199ff0bff447057fc5e291be0225ad5260111&v=4)](https://github.com/vrushankportkey)[@vrushankportkey](https://github.com/vrushankportkey) [![](https://avatars.githubusercontent.com/u/4852235?u=69b6d23a20085d57e304196e304cfd06f3393f3d&v=4)](https://github.com/jxnl)[@jxnl](https://github.com/jxnl) [![](https://avatars.githubusercontent.com/u/8412519?u=391d663c51163f604c14bc625f4d6c11042a0c36&v=4)](https://github.com/arron2003)[@arron2003](https://github.com/arron2003) [![](https://avatars.githubusercontent.com/u/17466553?u=2510816fc74e11bb543f54f97afe1c78e9bda720&v=4)](https://github.com/HashemAlsaket)[@HashemAlsaket](https://github.com/HashemAlsaket) [![](https://avatars.githubusercontent.com/u/1555858?v=4)](https://github.com/prakul)[@prakul](https://github.com/prakul) [![](https://avatars.githubusercontent.com/u/20924562?u=3f61dc32f82124727d7157c0977240770ab82c02&v=4)](https://github.com/ea-open-source)[@ea-open-source](https://github.com/ea-open-source) [![](https://avatars.githubusercontent.com/u/1473079?v=4)](https://github.com/constantinmusca)[@constantinmusca](https://github.com/constantinmusca) [![](https://avatars.githubusercontent.com/u/74497693?u=0d49e69abc1f1c5299d479d943285fcac7eee1ae&v=4)](https://github.com/Subsegment)[@Subsegment](https://github.com/Subsegment) [![](https://avatars.githubusercontent.com/u/15026857?u=a5129b6393cb746e25fca20655458d248ec4f05d&v=4)](https://github.com/zrcni)[@zrcni](https://github.com/zrcni) [![](https://avatars.githubusercontent.com/u/191493?u=3e803364d95e760cafa108ab29ee109ba0e0af83&v=4)](https://github.com/piizei)[@piizei](https://github.com/piizei) [![](https://avatars.githubusercontent.com/u/58871401?u=81f900fd6c286d9e8c5c8673f68b88387ed491e5&v=4)](https://github.com/RohanDey02)[@RohanDey02](https://github.com/RohanDey02) [![](https://avatars.githubusercontent.com/u/57868915?v=4)](https://github.com/SuperJokerayo)[@SuperJokerayo](https://github.com/SuperJokerayo) [![](https://avatars.githubusercontent.com/u/14224983?u=2a696ae181971f12ace4f252b759e1ca75ccdb44&v=4)](https://github.com/demjened)[@demjened](https://github.com/demjened) [![](https://avatars.githubusercontent.com/u/3285355?u=8f91986cb97c2efcd84d62e339d8be43562de13d&v=4)](https://github.com/killinsun)[@killinsun](https://github.com/killinsun) [![](https://avatars.githubusercontent.com/u/291370?u=5802ab31e0feb7ae15465dedaa48ba646f0a4127&v=4)](https://github.com/sanzgiri)[@sanzgiri](https://github.com/sanzgiri) [![](https://avatars.githubusercontent.com/u/1621509?u=e54d671ddef5ac7580003427246fc2247964c9ed&v=4)](https://github.com/MacanPN)[@MacanPN](https://github.com/MacanPN) [![](https://avatars.githubusercontent.com/u/6872942?v=4)](https://github.com/wlleiiwang)[@wlleiiwang](https://github.com/wlleiiwang) [![](https://avatars.githubusercontent.com/u/20760062?u=422c372863e9c42406db2241e41cc52c522431ef&v=4)](https://github.com/abdalrohman)[@abdalrohman](https://github.com/abdalrohman) [![](https://avatars.githubusercontent.com/u/3118964?u=471d785af68097fa9edeaa7bcd130b56ddda6338&v=4)](https://github.com/coyotespike)[@coyotespike](https://github.com/coyotespike) [![](https://avatars.githubusercontent.com/u/1039756?u=1e32f3165c823547362784b17f65f7690b56e0b0&v=4)](https://github.com/zchenyu)[@zchenyu](https://github.com/zchenyu) [![](https://avatars.githubusercontent.com/u/83261447?v=4)](https://github.com/yuwenzho)[@yuwenzho](https://github.com/yuwenzho) [![](https://avatars.githubusercontent.com/u/132831962?u=d91bc0c46bc4c4df36d752076418530eea55a5dc&v=4)](https://github.com/ricki-epsilla)[@ricki-epsilla](https://github.com/ricki-epsilla) [![](https://avatars.githubusercontent.com/u/2914618?v=4)](https://github.com/HassanOuda)[@HassanOuda](https://github.com/HassanOuda) [![](https://avatars.githubusercontent.com/u/2215597?u=d5558c7d5c1ab6d4a8e5381826abd1f00371a5be&v=4)](https://github.com/s-udhaya)[@s-udhaya](https://github.com/s-udhaya) [![](https://avatars.githubusercontent.com/u/5522060?v=4)](https://github.com/tesfagabir)[@tesfagabir](https://github.com/tesfagabir) [![](https://avatars.githubusercontent.com/u/56334152?v=4)](https://github.com/chocolate4)[@chocolate4](https://github.com/chocolate4) [![](https://avatars.githubusercontent.com/u/13938372?u=0e3f80aa515c41b7d9084b73d761cad378ebdc7a&v=4)](https://github.com/jasondotparse)[@jasondotparse](https://github.com/jasondotparse) [![](https://avatars.githubusercontent.com/u/12449236?u=f13eba9cfa9baf8fa9a0fce667eb2fe429ecd298&v=4)](https://github.com/bwmatson)[@bwmatson](https://github.com/bwmatson) [![](https://avatars.githubusercontent.com/u/38718601?u=44687611a0b7bd160ee129d04d4220d98f32ebab&v=4)](https://github.com/Daggx)[@Daggx](https://github.com/Daggx) [![](https://avatars.githubusercontent.com/u/78627776?u=7fd9922950b898ab502666f2cea155cf0200fe5f&v=4)](https://github.com/isahers1)[@isahers1](https://github.com/isahers1) [![](https://avatars.githubusercontent.com/u/848849?v=4)](https://github.com/seth-hg)[@seth-hg](https://github.com/seth-hg) [![](https://avatars.githubusercontent.com/u/34580718?u=cf4ff62610ff72ad9580d328e38f32e306d6150f&v=4)](https://github.com/NolanTrem)[@NolanTrem](https://github.com/NolanTrem) [![](https://avatars.githubusercontent.com/u/9007876?v=4)](https://github.com/mpb159753)[@mpb159753](https://github.com/mpb159753) [![](https://avatars.githubusercontent.com/u/800430?v=4)](https://github.com/mikeknoop)[@mikeknoop](https://github.com/mikeknoop) [![](https://avatars.githubusercontent.com/u/57349093?v=4)](https://github.com/datelier)[@datelier](https://github.com/datelier) [![](https://avatars.githubusercontent.com/u/13024750?u=6ae631199ec7c0bb34eb8d56200023cdd94720d3&v=4)](https://github.com/JamsheedMistri)[@JamsheedMistri](https://github.com/JamsheedMistri) [![](https://avatars.githubusercontent.com/u/42374034?u=cfb14ff1a7c4f0a500cd9c282bc3fbcba170daef&v=4)](https://github.com/atherfawaz)[@atherfawaz](https://github.com/atherfawaz) [![](https://avatars.githubusercontent.com/u/6012338?u=198f10817236beac03b10bb8f5cc6d7fcb133cc7&v=4)](https://github.com/Hugoberry)[@Hugoberry](https://github.com/Hugoberry) [![](https://avatars.githubusercontent.com/u/54216004?u=6a387166a0e8599c4f3ff35f61c12458df539f96&v=4)](https://github.com/Haris-Ali007)[@Haris-Ali007](https://github.com/Haris-Ali007) [![](https://avatars.githubusercontent.com/u/52078762?v=4)](https://github.com/AlpinDale)[@AlpinDale](https://github.com/AlpinDale) [![](https://avatars.githubusercontent.com/u/70274018?u=b6d5fd627cd26f590ed442d4dffa5bdddcb803cc&v=4)](https://github.com/jjovalle99)[@jjovalle99](https://github.com/jjovalle99) [![](https://avatars.githubusercontent.com/u/7529846?u=bd1b12fa55583ac7f01c4440cad87163a0fe3c19&v=4)](https://github.com/DN6)[@DN6](https://github.com/DN6) [![](https://avatars.githubusercontent.com/u/83648453?u=8557d590ff3516d093da32689816e898a08245ce&v=4)](https://github.com/spike-spiegel-21)[@spike-spiegel-21](https://github.com/spike-spiegel-21) [![](https://avatars.githubusercontent.com/u/91102080?u=c87d3f88e6b05445a121c204a0d39a0b9ec17e05&v=4)](https://github.com/mziru)[@mziru](https://github.com/mziru) [![](https://avatars.githubusercontent.com/u/56706206?v=4)](https://github.com/Dylan20XX)[@Dylan20XX](https://github.com/Dylan20XX) [![](https://avatars.githubusercontent.com/u/8936233?u=07eb2625319cd0fd18df747fcdeef42cd9fc981d&v=4)](https://github.com/xingfanxia)[@xingfanxia](https://github.com/xingfanxia) [![](https://avatars.githubusercontent.com/u/74933942?u=a952add7652d59815f24581d83f504216780521b&v=4)](https://github.com/0xJord4n)[@0xJord4n](https://github.com/0xJord4n) [![](https://avatars.githubusercontent.com/u/29782447?u=a8804de5269d64ef1c2587945e1b40925349c4a0&v=4)](https://github.com/tabbyl21)[@tabbyl21](https://github.com/tabbyl21) [![](https://avatars.githubusercontent.com/u/38180263?u=d514276e558f3f3aaba4844fdeb14eb84e9c8cc2&v=4)](https://github.com/naman-modi)[@naman-modi](https://github.com/naman-modi) [![](https://avatars.githubusercontent.com/u/126395124?u=79cff420daf96b72b14caca0061b57b884139f4f&v=4)](https://github.com/sokolgood)[@sokolgood](https://github.com/sokolgood) [![](https://avatars.githubusercontent.com/u/2310608?u=1e5009aa6681eed766a14cfb8849d820821dddce&v=4)](https://github.com/harelix)[@harelix](https://github.com/harelix) [![](https://avatars.githubusercontent.com/u/107643?v=4)](https://github.com/standby24x7)[@standby24x7](https://github.com/standby24x7) [![](https://avatars.githubusercontent.com/u/37549748?v=4)](https://github.com/lts-rad)[@lts-rad](https://github.com/lts-rad) [![](https://avatars.githubusercontent.com/u/829644?u=56a7fd939b2d15ed21011497db77ad3f569e8a60&v=4)](https://github.com/mengxr)[@mengxr](https://github.com/mengxr) [![](https://avatars.githubusercontent.com/u/9869689?u=b572050134e1e6a3c0096d2b032a5dec32725222&v=4)](https://github.com/nuric)[@nuric](https://github.com/nuric) [![](https://avatars.githubusercontent.com/u/16749003?v=4)](https://github.com/akshaya-a)[@akshaya-a](https://github.com/akshaya-a) [![](https://avatars.githubusercontent.com/u/16641288?u=f659a34367a54ea7ac49bc2a51ac27f4a72c770b&v=4)](https://github.com/edreisMD)[@edreisMD](https://github.com/edreisMD) [![](https://avatars.githubusercontent.com/u/18373802?u=92b9ba56d4178115777a0a1a7d2bf88c162f3fce&v=4)](https://github.com/ar-mccabe)[@ar-mccabe](https://github.com/ar-mccabe) [![](https://avatars.githubusercontent.com/u/98005188?u=21b5e30aa6464f46e85aa006cb44b2bd18c89347&v=4)](https://github.com/Navanit-git)[@Navanit-git](https://github.com/Navanit-git) [![](https://avatars.githubusercontent.com/u/127131037?u=74ffbf6c2a443f51f7e72d00b0a4e9a30b9e1c4c&v=4)](https://github.com/david-huge)[@david-huge](https://github.com/david-huge) [![](https://avatars.githubusercontent.com/u/91344214?u=5c34c21b464a6bbffd83a07aafac2cf9076856db&v=4)](https://github.com/rotemweiss57)[@rotemweiss57](https://github.com/rotemweiss57) [![](https://avatars.githubusercontent.com/u/9272497?u=bde02b58aebeb42b77cd6678456e8ead7f50ab66&v=4)](https://github.com/hmilkovi)[@hmilkovi](https://github.com/hmilkovi) [![](https://avatars.githubusercontent.com/u/42059733?u=502e381ca0e17491298e90ac3c5db019dd484efc&v=4)](https://github.com/vreyespue)[@vreyespue](https://github.com/vreyespue) [![](https://avatars.githubusercontent.com/u/2792?u=f5d3e57d22f60b27f9c87430dc45bceb49e88215&v=4)](https://github.com/deepblue)[@deepblue](https://github.com/deepblue) [![](https://avatars.githubusercontent.com/u/6087484?u=45381a549e19872d386ca7a7bf399dd571f2f3e8&v=4)](https://github.com/niklub)[@niklub](https://github.com/niklub) [![](https://avatars.githubusercontent.com/u/1081215?v=4)](https://github.com/dirtysalt)[@dirtysalt](https://github.com/dirtysalt) [![](https://avatars.githubusercontent.com/u/2138258?u=7de291a1ce0c95d6589496ba8e1d056c054ced00&v=4)](https://github.com/zeiler)[@zeiler](https://github.com/zeiler) [![](https://avatars.githubusercontent.com/u/16364994?u=d8603567cb87b4f76f0df2f7937252ae040cbebf&v=4)](https://github.com/sachinparyani)[@sachinparyani](https://github.com/sachinparyani) [![](https://avatars.githubusercontent.com/u/27913091?u=af5f1ab3c8383109dfed085fd2e2aa09599dece8&v=4)](https://github.com/ju-bezdek)[@ju-bezdek](https://github.com/ju-bezdek) [![](https://avatars.githubusercontent.com/u/108557828?u=1f1cc6b7e04613034c6ee4add7846c5a7333da26&v=4)](https://github.com/ColabDog)[@ColabDog](https://github.com/ColabDog) [![](https://avatars.githubusercontent.com/u/37485638?u=2552fdd04d05df363fa34b99c3cd3392762bf626&v=4)](https://github.com/hanit-com)[@hanit-com](https://github.com/hanit-com) [![](https://avatars.githubusercontent.com/u/2748495?v=4)](https://github.com/manmax31)[@manmax31](https://github.com/manmax31) [![](https://avatars.githubusercontent.com/u/38863?v=4)](https://github.com/imrehg)[@imrehg](https://github.com/imrehg) [![](https://avatars.githubusercontent.com/u/1454551?u=14928571307ed348c362e902edc913f6d81fea07&v=4)](https://github.com/janchorowski)[@janchorowski](https://github.com/janchorowski) [![](https://avatars.githubusercontent.com/u/90774897?v=4)](https://github.com/AthulVincent)[@AthulVincent](https://github.com/AthulVincent) [![](https://avatars.githubusercontent.com/u/23078323?u=7524c4ab19b061e21e62ddd6b48b6084fd6d54c1&v=4)](https://github.com/tamohannes)[@tamohannes](https://github.com/tamohannes) [![](https://avatars.githubusercontent.com/u/49598618?u=2d8024560f2f936312e819348cc18db338961fb7&v=4)](https://github.com/boazwasserman)[@boazwasserman](https://github.com/boazwasserman) [![](https://avatars.githubusercontent.com/u/30856?v=4)](https://github.com/dsummersl)[@dsummersl](https://github.com/dsummersl) [![](https://avatars.githubusercontent.com/u/280981?u=6c969bb88d84ac2c2ea100389504f63ac9155425&v=4)](https://github.com/idvorkin)[@idvorkin](https://github.com/idvorkin) [![](https://avatars.githubusercontent.com/u/24319338?v=4)](https://github.com/vempaliakhil96)[@vempaliakhil96](https://github.com/vempaliakhil96) [![](https://avatars.githubusercontent.com/u/18140070?u=1992cdb13c62ee66f4ccc8f000d2c6efae3056c3&v=4)](https://github.com/C-K-Loan)[@C-K-Loan](https://github.com/C-K-Loan) [![](https://avatars.githubusercontent.com/u/18020640?u=d47ad1cc8fb82340d1c77d1f191038372987f85a&v=4)](https://github.com/daniel-brenot)[@daniel-brenot](https://github.com/daniel-brenot) [![](https://avatars.githubusercontent.com/u/20795854?u=e0a8116151662cf0126b274f74fd279f34febf93&v=4)](https://github.com/jwbeck97)[@jwbeck97](https://github.com/jwbeck97) We're so thankful for your support! And one more thank you to [@tiangolo](https://github.com/tiangolo) for inspiration via FastAPI's [excellent people page](https://fastapi.tiangolo.com/fastapi-people). [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/people.mdx) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E).
null
https://python.langchain.com/v0.2/docs/how_to/llm_caching/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to cache LLM responses On this page How to cache LLM responses ========================== LangChain provides an optional caching layer for LLMs. This is useful for two reasons: It can save you money by reducing the number of API calls you make to the LLM provider, if you're often requesting the same completion multiple times. It can speed up your application by reducing the number of API calls you make to the LLM provider. from langchain.globals import set_llm_cachefrom langchain_openai import OpenAI# To make the caching really obvious, lets use a slower model.llm = OpenAI(model_name="gpt-3.5-turbo-instruct", n=2, best_of=2) **API Reference:**[set\_llm\_cache](https://api.python.langchain.com/en/latest/globals/langchain.globals.set_llm_cache.html) | [OpenAI](https://api.python.langchain.com/en/latest/llms/langchain_openai.llms.base.OpenAI.html) %%timefrom langchain.cache import InMemoryCacheset_llm_cache(InMemoryCache())# The first time, it is not yet in cache, so it should take longerllm.predict("Tell me a joke") **API Reference:**[InMemoryCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.InMemoryCache.html) CPU times: user 13.7 ms, sys: 6.54 ms, total: 20.2 msWall time: 330 ms "\n\nWhy couldn't the bicycle stand up by itself? Because it was two-tired!" %%time# The second time it is, so it goes fasterllm.predict("Tell me a joke") CPU times: user 436 µs, sys: 921 µs, total: 1.36 msWall time: 1.36 ms "\n\nWhy couldn't the bicycle stand up by itself? Because it was two-tired!" SQLite Cache[​](#sqlite-cache "Direct link to SQLite Cache") ------------------------------------------------------------ !rm .langchain.db # We can do the same thing with a SQLite cachefrom langchain_community.cache import SQLiteCacheset_llm_cache(SQLiteCache(database_path=".langchain.db")) **API Reference:**[SQLiteCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.SQLiteCache.html) %%time# The first time, it is not yet in cache, so it should take longerllm.predict("Tell me a joke") CPU times: user 29.3 ms, sys: 17.3 ms, total: 46.7 msWall time: 364 ms '\n\nWhy did the tomato turn red?\n\nBecause it saw the salad dressing!' %%time# The second time it is, so it goes fasterllm.predict("Tell me a joke") CPU times: user 4.58 ms, sys: 2.23 ms, total: 6.8 msWall time: 4.68 ms '\n\nWhy did the tomato turn red?\n\nBecause it saw the salad dressing!' [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/llm_caching.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous LangChain Expression Language Cheatsheet ](/v0.2/docs/how_to/lcel_cheatsheet/)[ Next How to track token usage for LLMs ](/v0.2/docs/how_to/llm_token_usage_tracking/) * [SQLite Cache](#sqlite-cache)
null
https://python.langchain.com/v0.2/docs/how_to/logprobs/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to get log probabilities On this page How to get log probabilities ============================ Prerequisites This guide assumes familiarity with the following concepts: * [Chat models](/v0.2/docs/concepts/#chat-models) Certain chat models can be configured to return token-level log probabilities representing the likelihood of a given token. This guide walks through how to get this information in LangChain. OpenAI[​](#openai "Direct link to OpenAI") ------------------------------------------ Install the LangChain x OpenAI package and set your API key %pip install -qU langchain-openai import getpassimport osos.environ["OPENAI_API_KEY"] = getpass.getpass() For the OpenAI API to return log probabilities we need to configure the `logprobs=True` param. Then, the logprobs are included on each output [`AIMessage`](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html) as part of the `response_metadata`: from langchain_openai import ChatOpenAIllm = ChatOpenAI(model="gpt-3.5-turbo-0125").bind(logprobs=True)msg = llm.invoke(("human", "how are you today"))msg.response_metadata["logprobs"]["content"][:5] **API Reference:**[ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) [{'token': 'I', 'bytes': [73], 'logprob': -0.26341408, 'top_logprobs': []}, {'token': "'m", 'bytes': [39, 109], 'logprob': -0.48584133, 'top_logprobs': []}, {'token': ' just', 'bytes': [32, 106, 117, 115, 116], 'logprob': -0.23484154, 'top_logprobs': []}, {'token': ' a', 'bytes': [32, 97], 'logprob': -0.0018291725, 'top_logprobs': []}, {'token': ' computer', 'bytes': [32, 99, 111, 109, 112, 117, 116, 101, 114], 'logprob': -0.052299336, 'top_logprobs': []}] And are part of streamed Message chunks as well: ct = 0full = Nonefor chunk in llm.stream(("human", "how are you today")): if ct < 5: full = chunk if full is None else full + chunk if "logprobs" in full.response_metadata: print(full.response_metadata["logprobs"]["content"]) else: break ct += 1 [][{'token': 'I', 'bytes': [73], 'logprob': -0.26593843, 'top_logprobs': []}][{'token': 'I', 'bytes': [73], 'logprob': -0.26593843, 'top_logprobs': []}, {'token': "'m", 'bytes': [39, 109], 'logprob': -0.3238896, 'top_logprobs': []}][{'token': 'I', 'bytes': [73], 'logprob': -0.26593843, 'top_logprobs': []}, {'token': "'m", 'bytes': [39, 109], 'logprob': -0.3238896, 'top_logprobs': []}, {'token': ' just', 'bytes': [32, 106, 117, 115, 116], 'logprob': -0.23778509, 'top_logprobs': []}][{'token': 'I', 'bytes': [73], 'logprob': -0.26593843, 'top_logprobs': []}, {'token': "'m", 'bytes': [39, 109], 'logprob': -0.3238896, 'top_logprobs': []}, {'token': ' just', 'bytes': [32, 106, 117, 115, 116], 'logprob': -0.23778509, 'top_logprobs': []}, {'token': ' a', 'bytes': [32, 97], 'logprob': -0.0022134194, 'top_logprobs': []}] Next steps[​](#next-steps "Direct link to Next steps") ------------------------------------------------------ You've now learned how to get logprobs from OpenAI models in LangChain. Next, check out the other how-to guides chat models in this section, like [how to get a model to return structured output](/v0.2/docs/how_to/structured_output/) or [how to track token usage](/v0.2/docs/how_to/chat_token_usage_tracking/). [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/logprobs.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous Run LLMs locally ](/v0.2/docs/how_to/local_llms/)[ Next How to reorder retrieved results to mitigate the "lost in the middle" effect ](/v0.2/docs/how_to/long_context_reorder/) * [OpenAI](#openai) * [Next steps](#next-steps)
null
https://python.langchain.com/v0.2/docs/how_to/local_llms/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * Run LLMs locally On this page Run LLMs locally ================ Use case[​](#use-case "Direct link to Use case") ------------------------------------------------ The popularity of projects like [PrivateGPT](https://github.com/imartinez/privateGPT), [llama.cpp](https://github.com/ggerganov/llama.cpp), [Ollama](https://github.com/ollama/ollama), [GPT4All](https://github.com/nomic-ai/gpt4all), [llamafile](https://github.com/Mozilla-Ocho/llamafile), and others underscore the demand to run LLMs locally (on your own device). This has at least two important benefits: 1. `Privacy`: Your data is not sent to a third party, and it is not subject to the terms of service of a commercial service 2. `Cost`: There is no inference fee, which is important for token-intensive applications (e.g., [long-running simulations](https://twitter.com/RLanceMartin/status/1691097659262820352?s=20), summarization) Overview[​](#overview "Direct link to Overview") ------------------------------------------------ Running an LLM locally requires a few things: 1. `Open-source LLM`: An open-source LLM that can be freely modified and shared 2. `Inference`: Ability to run this LLM on your device w/ acceptable latency ### Open-source LLMs[​](#open-source-llms "Direct link to Open-source LLMs") Users can now gain access to a rapidly growing set of [open-source LLMs](https://cameronrwolfe.substack.com/p/the-history-of-open-source-llms-better). These LLMs can be assessed across at least two dimensions (see figure): 1. `Base model`: What is the base-model and how was it trained? 2. `Fine-tuning approach`: Was the base-model fine-tuned and, if so, what [set of instructions](https://cameronrwolfe.substack.com/p/beyond-llama-the-power-of-open-llms#%C2%A7alpaca-an-instruction-following-llama-model) was used? ![Image description](/v0.2/assets/images/OSS_LLM_overview-9444c9793c76bd4785a5b0cd020c14ef.png) The relative performance of these models can be assessed using several leaderboards, including: 1. [LmSys](https://chat.lmsys.org/?arena) 2. [GPT4All](https://gpt4all.io/index.html) 3. [HuggingFace](https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard) ### Inference[​](#inference "Direct link to Inference") A few frameworks for this have emerged to support inference of open-source LLMs on various devices: 1. [`llama.cpp`](https://github.com/ggerganov/llama.cpp): C++ implementation of llama inference code with [weight optimization / quantization](https://finbarr.ca/how-is-llama-cpp-possible/) 2. [`gpt4all`](https://docs.gpt4all.io/index.html): Optimized C backend for inference 3. [`Ollama`](https://ollama.ai/): Bundles model weights and environment into an app that runs on device and serves the LLM 4. [`llamafile`](https://github.com/Mozilla-Ocho/llamafile): Bundles model weights and everything needed to run the model in a single file, allowing you to run the LLM locally from this file without any additional installation steps In general, these frameworks will do a few things: 1. `Quantization`: Reduce the memory footprint of the raw model weights 2. `Efficient implementation for inference`: Support inference on consumer hardware (e.g., CPU or laptop GPU) In particular, see [this excellent post](https://finbarr.ca/how-is-llama-cpp-possible/) on the importance of quantization. ![Image description](/v0.2/assets/images/llama-memory-weights-aaccef5df087e993b0f46277500039b6.png) With less precision, we radically decrease the memory needed to store the LLM in memory. In addition, we can see the importance of GPU memory bandwidth [sheet](https://docs.google.com/spreadsheets/d/1OehfHHNSn66BP2h3Bxp2NJTVX97icU0GmCXF6pK23H8/edit#gid=0)! A Mac M2 Max is 5-6x faster than a M1 for inference due to the larger GPU memory bandwidth. ![Image description](/v0.2/assets/images/llama_t_put-c6f0ea201a6dd508999170325cd6804a.png) Quickstart[​](#quickstart "Direct link to Quickstart") ------------------------------------------------------ [`Ollama`](https://ollama.ai/) is one way to easily run inference on macOS. The instructions [here](https://github.com/jmorganca/ollama?tab=readme-ov-file#ollama) provide details, which we summarize: * [Download and run](https://ollama.ai/download) the app * From command line, fetch a model from this [list of options](https://github.com/jmorganca/ollama): e.g., `ollama pull llama2` * When the app is running, all models are automatically served on `localhost:11434` from langchain_community.llms import Ollamallm = Ollama(model="llama2")llm.invoke("The first man on the moon was ...") **API Reference:**[Ollama](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.ollama.Ollama.html) ' The first man on the moon was Neil Armstrong, who landed on the moon on July 20, 1969 as part of the Apollo 11 mission. obviously.' Stream tokens as they are being generated. from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandlerllm = Ollama( model="llama2", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]))llm.invoke("The first man on the moon was ...") **API Reference:**[CallbackManager](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.CallbackManager.html) | [StreamingStdOutCallbackHandler](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.streaming_stdout.StreamingStdOutCallbackHandler.html) The first man to walk on the moon was Neil Armstrong, an American astronaut who was part of the Apollo 11 mission in 1969. февруари 20, 1969, Armstrong stepped out of the lunar module Eagle and onto the moon's surface, famously declaring "That's one small step for man, one giant leap for mankind" as he took his first steps. He was followed by fellow astronaut Edwin "Buzz" Aldrin, who also walked on the moon during the mission. ' The first man to walk on the moon was Neil Armstrong, an American astronaut who was part of the Apollo 11 mission in 1969. февруари 20, 1969, Armstrong stepped out of the lunar module Eagle and onto the moon\'s surface, famously declaring "That\'s one small step for man, one giant leap for mankind" as he took his first steps. He was followed by fellow astronaut Edwin "Buzz" Aldrin, who also walked on the moon during the mission.' Environment[​](#environment "Direct link to Environment") --------------------------------------------------------- Inference speed is a challenge when running models locally (see above). To minimize latency, it is desirable to run models locally on GPU, which ships with many consumer laptops [e.g., Apple devices](https://www.apple.com/newsroom/2022/06/apple-unveils-m2-with-breakthrough-performance-and-capabilities/). And even with GPU, the available GPU memory bandwidth (as noted above) is important. ### Running Apple silicon GPU[​](#running-apple-silicon-gpu "Direct link to Running Apple silicon GPU") `Ollama` and [`llamafile`](https://github.com/Mozilla-Ocho/llamafile?tab=readme-ov-file#gpu-support) will automatically utilize the GPU on Apple devices. Other frameworks require the user to set up the environment to utilize the Apple GPU. For example, `llama.cpp` python bindings can be configured to use the GPU via [Metal](https://developer.apple.com/metal/). Metal is a graphics and compute API created by Apple providing near-direct access to the GPU. See the [`llama.cpp`](/v0.2/docs/how_to/local_llms/docs/integrations/llms/llamacpp/) setup [here](https://github.com/abetlen/llama-cpp-python/blob/main/docs/install/macos.md) to enable this. In particular, ensure that conda is using the correct virtual environment that you created (`miniforge3`). E.g., for me: conda activate /Users/rlm/miniforge3/envs/llama With the above confirmed, then: CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 pip install -U llama-cpp-python --no-cache-dir LLMs[​](#llms "Direct link to LLMs") ------------------------------------ There are various ways to gain access to quantized model weights. 1. [`HuggingFace`](https://huggingface.co/TheBloke) - Many quantized model are available for download and can be run with framework such as [`llama.cpp`](https://github.com/ggerganov/llama.cpp). You can also download models in [`llamafile` format](https://huggingface.co/models?other=llamafile) from HuggingFace. 2. [`gpt4all`](https://gpt4all.io/index.html) - The model explorer offers a leaderboard of metrics and associated quantized models available for download 3. [`Ollama`](https://github.com/jmorganca/ollama) - Several models can be accessed directly via `pull` ### Ollama[​](#ollama "Direct link to Ollama") With [Ollama](https://github.com/jmorganca/ollama), fetch a model via `ollama pull <model family>:<tag>`: * E.g., for Llama-7b: `ollama pull llama2` will download the most basic version of the model (e.g., smallest # parameters and 4 bit quantization) * We can also specify a particular version from the [model list](https://github.com/jmorganca/ollama?tab=readme-ov-file#model-library), e.g., `ollama pull llama2:13b` * See the full set of parameters on the [API reference page](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.ollama.Ollama.html) from langchain_community.llms import Ollamallm = Ollama(model="llama2:13b")llm.invoke("The first man on the moon was ... think step by step") **API Reference:**[Ollama](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.ollama.Ollama.html) ' Sure! Here\'s the answer, broken down step by step:\n\nThe first man on the moon was... Neil Armstrong.\n\nHere\'s how I arrived at that answer:\n\n1. The first manned mission to land on the moon was Apollo 11.\n2. The mission included three astronauts: Neil Armstrong, Edwin "Buzz" Aldrin, and Michael Collins.\n3. Neil Armstrong was the mission commander and the first person to set foot on the moon.\n4. On July 20, 1969, Armstrong stepped out of the lunar module Eagle and onto the moon\'s surface, famously declaring "That\'s one small step for man, one giant leap for mankind."\n\nSo, the first man on the moon was Neil Armstrong!' ### Llama.cpp[​](#llamacpp "Direct link to Llama.cpp") Llama.cpp is compatible with a [broad set of models](https://github.com/ggerganov/llama.cpp). For example, below we run inference on `llama2-13b` with 4 bit quantization downloaded from [HuggingFace](https://huggingface.co/TheBloke/Llama-2-13B-GGML/tree/main). As noted above, see the [API reference](https://api.python.langchain.com/en/latest/llms/langchain.llms.llamacpp.LlamaCpp.html?highlight=llamacpp#langchain.llms.llamacpp.LlamaCpp) for the full set of parameters. From the [llama.cpp API reference docs](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.llamacpp.LlamaCpp.htm), a few are worth commenting on: `n_gpu_layers`: number of layers to be loaded into GPU memory * Value: 1 * Meaning: Only one layer of the model will be loaded into GPU memory (1 is often sufficient). `n_batch`: number of tokens the model should process in parallel * Value: n\_batch * Meaning: It's recommended to choose a value between 1 and n\_ctx (which in this case is set to 2048) `n_ctx`: Token context window * Value: 2048 * Meaning: The model will consider a window of 2048 tokens at a time `f16_kv`: whether the model should use half-precision for the key/value cache * Value: True * Meaning: The model will use half-precision, which can be more memory efficient; Metal only supports True. %env CMAKE_ARGS="-DLLAMA_METAL=on"%env FORCE_CMAKE=1%pip install --upgrade --quiet llama-cpp-python --no-cache-dirclear from langchain_community.llms import LlamaCppfrom langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandlerllm = LlamaCpp( model_path="/Users/rlm/Desktop/Code/llama.cpp/models/openorca-platypus2-13b.gguf.q4_0.bin", n_gpu_layers=1, n_batch=512, n_ctx=2048, f16_kv=True, callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]), verbose=True,) **API Reference:**[LlamaCpp](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.llamacpp.LlamaCpp.html) | [CallbackManager](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.CallbackManager.html) | [StreamingStdOutCallbackHandler](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.streaming_stdout.StreamingStdOutCallbackHandler.html) The console log will show the below to indicate Metal was enabled properly from steps above: ggml_metal_init: allocatingggml_metal_init: using MPS llm.invoke("The first man on the moon was ... Let's think step by step") Llama.generate: prefix-match hit``````output and use logical reasoning to figure out who the first man on the moon was.Here are some clues:1. The first man on the moon was an American.2. He was part of the Apollo 11 mission.3. He stepped out of the lunar module and became the first person to set foot on the moon's surface.4. His last name is Armstrong.Now, let's use our reasoning skills to figure out who the first man on the moon was. Based on clue #1, we know that the first man on the moon was an American. Clue #2 tells us that he was part of the Apollo 11 mission. Clue #3 reveals that he was the first person to set foot on the moon's surface. And finally, clue #4 gives us his last name: Armstrong.Therefore, the first man on the moon was Neil Armstrong!``````outputllama_print_timings: load time = 9623.21 msllama_print_timings: sample time = 143.77 ms / 203 runs ( 0.71 ms per token, 1412.01 tokens per second)llama_print_timings: prompt eval time = 485.94 ms / 7 tokens ( 69.42 ms per token, 14.40 tokens per second)llama_print_timings: eval time = 6385.16 ms / 202 runs ( 31.61 ms per token, 31.64 tokens per second)llama_print_timings: total time = 7279.28 ms " and use logical reasoning to figure out who the first man on the moon was.\n\nHere are some clues:\n\n1. The first man on the moon was an American.\n2. He was part of the Apollo 11 mission.\n3. He stepped out of the lunar module and became the first person to set foot on the moon's surface.\n4. His last name is Armstrong.\n\nNow, let's use our reasoning skills to figure out who the first man on the moon was. Based on clue #1, we know that the first man on the moon was an American. Clue #2 tells us that he was part of the Apollo 11 mission. Clue #3 reveals that he was the first person to set foot on the moon's surface. And finally, clue #4 gives us his last name: Armstrong.\nTherefore, the first man on the moon was Neil Armstrong!" ### GPT4All[​](#gpt4all "Direct link to GPT4All") We can use model weights downloaded from [GPT4All](/v0.2/docs/integrations/llms/gpt4all/) model explorer. Similar to what is shown above, we can run inference and use [the API reference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.gpt4all.GPT4All.html) to set parameters of interest. %pip install gpt4all from langchain_community.llms import GPT4Allllm = GPT4All( model="/Users/rlm/Desktop/Code/gpt4all/models/nous-hermes-13b.ggmlv3.q4_0.bin") **API Reference:**[GPT4All](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.gpt4all.GPT4All.html) llm.invoke("The first man on the moon was ... Let's think step by step") ".\n1) The United States decides to send a manned mission to the moon.2) They choose their best astronauts and train them for this specific mission.3) They build a spacecraft that can take humans to the moon, called the Lunar Module (LM).4) They also create a larger spacecraft, called the Saturn V rocket, which will launch both the LM and the Command Service Module (CSM), which will carry the astronauts into orbit.5) The mission is planned down to the smallest detail: from the trajectory of the rockets to the exact movements of the astronauts during their moon landing.6) On July 16, 1969, the Saturn V rocket launches from Kennedy Space Center in Florida, carrying the Apollo 11 mission crew into space.7) After one and a half orbits around the Earth, the LM separates from the CSM and begins its descent to the moon's surface.8) On July 20, 1969, at 2:56 pm EDT (GMT-4), Neil Armstrong becomes the first man on the moon. He speaks these" ### llamafile[​](#llamafile "Direct link to llamafile") One of the simplest ways to run an LLM locally is using a [llamafile](https://github.com/Mozilla-Ocho/llamafile). All you need to do is: 1) Download a llamafile from [HuggingFace](https://huggingface.co/models?other=llamafile) 2) Make the file executable 3) Run the file llamafiles bundle model weights and a [specially-compiled](https://github.com/Mozilla-Ocho/llamafile?tab=readme-ov-file#technical-details) version of [`llama.cpp`](https://github.com/ggerganov/llama.cpp) into a single file that can run on most computers any additional dependencies. They also come with an embedded inference server that provides an [API](https://github.com/Mozilla-Ocho/llamafile/blob/main/llama.cpp/server/README.md#api-endpoints) for interacting with your model. Here's a simple bash script that shows all 3 setup steps: # Download a llamafile from HuggingFacewget https://huggingface.co/jartine/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/TinyLlama-1.1B-Chat-v1.0.Q5_K_M.llamafile# Make the file executable. On Windows, instead just rename the file to end in ".exe".chmod +x TinyLlama-1.1B-Chat-v1.0.Q5_K_M.llamafile# Start the model server. Listens at http://localhost:8080 by default../TinyLlama-1.1B-Chat-v1.0.Q5_K_M.llamafile --server --nobrowser After you run the above setup steps, you can use LangChain to interact with your model: from langchain_community.llms.llamafile import Llamafilellm = Llamafile()llm.invoke("The first man on the moon was ... Let's think step by step.") **API Reference:**[Llamafile](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.llamafile.Llamafile.html) "\nFirstly, let's imagine the scene where Neil Armstrong stepped onto the moon. This happened in 1969. The first man on the moon was Neil Armstrong. We already know that.\n2nd, let's take a step back. Neil Armstrong didn't have any special powers. He had to land his spacecraft safely on the moon without injuring anyone or causing any damage. If he failed to do this, he would have been killed along with all those people who were on board the spacecraft.\n3rd, let's imagine that Neil Armstrong successfully landed his spacecraft on the moon and made it back to Earth safely. The next step was for him to be hailed as a hero by his people back home. It took years before Neil Armstrong became an American hero.\n4th, let's take another step back. Let's imagine that Neil Armstrong wasn't hailed as a hero, and instead, he was just forgotten. This happened in the 1970s. Neil Armstrong wasn't recognized for his remarkable achievement on the moon until after he died.\n5th, let's take another step back. Let's imagine that Neil Armstrong didn't die in the 1970s and instead, lived to be a hundred years old. This happened in 2036. In the year 2036, Neil Armstrong would have been a centenarian.\nNow, let's think about the present. Neil Armstrong is still alive. He turned 95 years old on July 20th, 2018. If he were to die now, his achievement of becoming the first human being to set foot on the moon would remain an unforgettable moment in history.\nI hope this helps you understand the significance and importance of Neil Armstrong's achievement on the moon!" Prompts[​](#prompts "Direct link to Prompts") --------------------------------------------- Some LLMs will benefit from specific prompts. For example, LLaMA will use [special tokens](https://twitter.com/RLanceMartin/status/1681879318493003776?s=20). We can use `ConditionalPromptSelector` to set prompt based on the model type. # Set our LLMllm = LlamaCpp( model_path="/Users/rlm/Desktop/Code/llama.cpp/models/openorca-platypus2-13b.gguf.q4_0.bin", n_gpu_layers=1, n_batch=512, n_ctx=2048, f16_kv=True, callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]), verbose=True,) Set the associated prompt based upon the model version. from langchain.chains import LLMChainfrom langchain.chains.prompt_selector import ConditionalPromptSelectorfrom langchain_core.prompts import PromptTemplateDEFAULT_LLAMA_SEARCH_PROMPT = PromptTemplate( input_variables=["question"], template="""<<SYS>> \n You are an assistant tasked with improving Google search \results. \n <</SYS>> \n\n [INST] Generate THREE Google search queries that \are similar to this question. The output should be a numbered list of questions \and each should have a question mark at the end: \n\n {question} [/INST]""",)DEFAULT_SEARCH_PROMPT = PromptTemplate( input_variables=["question"], template="""You are an assistant tasked with improving Google search \results. Generate THREE Google search queries that are similar to \this question. The output should be a numbered list of questions and each \should have a question mark at the end: {question}""",)QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector( default_prompt=DEFAULT_SEARCH_PROMPT, conditionals=[(lambda llm: isinstance(llm, LlamaCpp), DEFAULT_LLAMA_SEARCH_PROMPT)],)prompt = QUESTION_PROMPT_SELECTOR.get_prompt(llm)prompt **API Reference:**[LLMChain](https://api.python.langchain.com/en/latest/chains/langchain.chains.llm.LLMChain.html) | [ConditionalPromptSelector](https://api.python.langchain.com/en/latest/chains/langchain.chains.prompt_selector.ConditionalPromptSelector.html) | [PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html) PromptTemplate(input_variables=['question'], output_parser=None, partial_variables={}, template='<<SYS>> \n You are an assistant tasked with improving Google search results. \n <</SYS>> \n\n [INST] Generate THREE Google search queries that are similar to this question. The output should be a numbered list of questions and each should have a question mark at the end: \n\n {question} [/INST]', template_format='f-string', validate_template=True) # Chainllm_chain = LLMChain(prompt=prompt, llm=llm)question = "What NFL team won the Super Bowl in the year that Justin Bieber was born?"llm_chain.run({"question": question}) Sure! Here are three similar search queries with a question mark at the end:1. Which NBA team did LeBron James lead to a championship in the year he was drafted?2. Who won the Grammy Awards for Best New Artist and Best Female Pop Vocal Performance in the same year that Lady Gaga was born?3. What MLB team did Babe Ruth play for when he hit 60 home runs in a single season?``````outputllama_print_timings: load time = 14943.19 msllama_print_timings: sample time = 72.93 ms / 101 runs ( 0.72 ms per token, 1384.87 tokens per second)llama_print_timings: prompt eval time = 14942.95 ms / 93 tokens ( 160.68 ms per token, 6.22 tokens per second)llama_print_timings: eval time = 3430.85 ms / 100 runs ( 34.31 ms per token, 29.15 tokens per second)llama_print_timings: total time = 18578.26 ms ' Sure! Here are three similar search queries with a question mark at the end:\n\n1. Which NBA team did LeBron James lead to a championship in the year he was drafted?\n2. Who won the Grammy Awards for Best New Artist and Best Female Pop Vocal Performance in the same year that Lady Gaga was born?\n3. What MLB team did Babe Ruth play for when he hit 60 home runs in a single season?' We also can use the LangChain Prompt Hub to fetch and / or store prompts that are model specific. This will work with your [LangSmith API key](https://docs.smith.langchain.com/). For example, [here](https://smith.langchain.com/hub/rlm/rag-prompt-llama) is a prompt for RAG with LLaMA-specific tokens. Use cases[​](#use-cases "Direct link to Use cases") --------------------------------------------------- Given an `llm` created from one of the models above, you can use it for [many use cases](/v0.2/docs/how_to/#use-cases). For example, here is a guide to [RAG](/v0.2/docs/tutorials/local_rag/) with local LLMs. In general, use cases for local LLMs can be driven by at least two factors: * `Privacy`: private data (e.g., journals, etc) that a user does not want to share * `Cost`: text preprocessing (extraction/tagging), summarization, and agent simulations are token-use-intensive tasks In addition, [here](https://blog.langchain.dev/using-langsmith-to-support-fine-tuning-of-open-source-llms/) is an overview on fine-tuning, which can utilize open-source LLMs. [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/local_llms.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to track token usage for LLMs ](/v0.2/docs/how_to/llm_token_usage_tracking/)[ Next How to get log probabilities ](/v0.2/docs/how_to/logprobs/) * [Use case](#use-case) * [Overview](#overview) * [Open-source LLMs](#open-source-llms) * [Inference](#inference) * [Quickstart](#quickstart) * [Environment](#environment) * [Running Apple silicon GPU](#running-apple-silicon-gpu) * [LLMs](#llms) * [Ollama](#ollama) * [Llama.cpp](#llamacpp) * [GPT4All](#gpt4all) * [llamafile](#llamafile) * [Prompts](#prompts) * [Use cases](#use-cases)
null
https://python.langchain.com/v0.2/docs/additional_resources/arxiv_references/
On this page arXiv ===== LangChain implements the latest research in the field of Natural Language Processing. This page contains `arXiv` papers referenced in the LangChain Documentation, API Reference, Templates, and Cookbooks. From the opposite direction, scientists use LangChain in research and reference LangChain in the research papers. Here you find [such papers](https://arxiv.org/search/?query=langchain&searchtype=all&source=header). Summary[​](#summary "Direct link to Summary") --------------------------------------------- arXiv id / Title Authors Published date 🔻 LangChain Documentation `2402.03620v1` [Self-Discover: Large Language Models Self-Compose Reasoning Structures](http://arxiv.org/abs/2402.03620v1) Pei Zhou, Jay Pujara, Xiang Ren, et al. 2024-02-06 `Cookbook:` [self-discover](https://github.com/langchain-ai/langchain/blob/master/cookbook/self-discover.ipynb) `2401.18059v1` [RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval](http://arxiv.org/abs/2401.18059v1) Parth Sarthi, Salman Abdullah, Aditi Tuli, et al. 2024-01-31 `Cookbook:` [RAPTOR](https://github.com/langchain-ai/langchain/blob/master/cookbook/RAPTOR.ipynb) `2401.15884v2` [Corrective Retrieval Augmented Generation](http://arxiv.org/abs/2401.15884v2) Shi-Qi Yan, Jia-Chen Gu, Yun Zhu, et al. 2024-01-29 `Cookbook:` [langgraph\_crag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_crag.ipynb) `2401.04088v1` [Mixtral of Experts](http://arxiv.org/abs/2401.04088v1) Albert Q. Jiang, Alexandre Sablayrolles, Antoine Roux, et al. 2024-01-08 `Cookbook:` [together\_ai](https://github.com/langchain-ai/langchain/blob/master/cookbook/together_ai.ipynb) `2312.06648v2` [Dense X Retrieval: What Retrieval Granularity Should We Use?](http://arxiv.org/abs/2312.06648v2) Tong Chen, Hongwei Wang, Sihao Chen, et al. 2023-12-11 `Template:` [propositional-retrieval](https://python.langchain.com/docs/templates/propositional-retrieval) `2311.09210v1` [Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models](http://arxiv.org/abs/2311.09210v1) Wenhao Yu, Hongming Zhang, Xiaoman Pan, et al. 2023-11-15 `Template:` [chain-of-note-wiki](https://python.langchain.com/docs/templates/chain-of-note-wiki) `2310.11511v1` [Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection](http://arxiv.org/abs/2310.11511v1) Akari Asai, Zeqiu Wu, Yizhong Wang, et al. 2023-10-17 `Cookbook:` [langgraph\_self\_rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_self_rag.ipynb) `2310.06117v2` [Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models](http://arxiv.org/abs/2310.06117v2) Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, et al. 2023-10-09 `Template:` [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting), `Cookbook:` [stepback-qa](https://github.com/langchain-ai/langchain/blob/master/cookbook/stepback-qa.ipynb) `2307.09288v2` [Llama 2: Open Foundation and Fine-Tuned Chat Models](http://arxiv.org/abs/2307.09288v2) Hugo Touvron, Louis Martin, Kevin Stone, et al. 2023-07-18 `Cookbook:` [Semi\_Structured\_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_Structured_RAG.ipynb) `2305.14283v3` [Query Rewriting for Retrieval-Augmented Large Language Models](http://arxiv.org/abs/2305.14283v3) Xinbei Ma, Yeyun Gong, Pengcheng He, et al. 2023-05-23 `Template:` [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read), `Cookbook:` [rewrite](https://github.com/langchain-ai/langchain/blob/master/cookbook/rewrite.ipynb) `2305.08291v1` [Large Language Model Guided Tree-of-Thought](http://arxiv.org/abs/2305.08291v1) Jieyi Long 2023-05-15 `API:` [langchain\_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot), `Cookbook:` [tree\_of\_thought](https://github.com/langchain-ai/langchain/blob/master/cookbook/tree_of_thought.ipynb) `2305.04091v3` [Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models](http://arxiv.org/abs/2305.04091v3) Lei Wang, Wanyu Xu, Yihuai Lan, et al. 2023-05-06 `Cookbook:` [plan\_and\_execute\_agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb) `2304.08485v2` [Visual Instruction Tuning](http://arxiv.org/abs/2304.08485v2) Haotian Liu, Chunyuan Li, Qingyang Wu, et al. 2023-04-17 `Cookbook:` [Semi\_structured\_and\_multi\_modal\_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb), [Semi\_structured\_multi\_modal\_RAG\_LLaMA2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb) `2304.03442v2` [Generative Agents: Interactive Simulacra of Human Behavior](http://arxiv.org/abs/2304.03442v2) Joon Sung Park, Joseph C. O'Brien, Carrie J. Cai, et al. 2023-04-07 `Cookbook:` [multiagent\_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb), [generative\_agents\_interactive\_simulacra\_of\_human\_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb) `2303.17760v2` [CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society](http://arxiv.org/abs/2303.17760v2) Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, et al. 2023-03-31 `Cookbook:` [camel\_role\_playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb) `2303.17580v4` [HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face](http://arxiv.org/abs/2303.17580v4) Yongliang Shen, Kaitao Song, Xu Tan, et al. 2023-03-30 `API:` [langchain\_experimental.autonomous\_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents), `Cookbook:` [hugginggpt](https://github.com/langchain-ai/langchain/blob/master/cookbook/hugginggpt.ipynb) `2303.08774v6` [GPT-4 Technical Report](http://arxiv.org/abs/2303.08774v6) OpenAI, Josh Achiam, Steven Adler, et al. 2023-03-15 `Docs:` [docs/integrations/vectorstores/mongodb\_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas) `2301.10226v4` [A Watermark for Large Language Models](http://arxiv.org/abs/2301.10226v4) John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al. 2023-01-24 `API:` [langchain\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain\_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference) `2212.10496v1` [Precise Zero-Shot Dense Retrieval without Relevance Labels](http://arxiv.org/abs/2212.10496v1) Luyu Gao, Xueguang Ma, Jimmy Lin, et al. 2022-12-20 `API:` [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder), `Template:` [hyde](https://python.langchain.com/docs/templates/hyde), `Cookbook:` [hypothetical\_document\_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb) `2212.07425v3` [Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments](http://arxiv.org/abs/2212.07425v3) Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al. 2022-12-12 `API:` [langchain\_experimental.fallacy\_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal) `2211.13892v2` [Complementary Explanations for Effective In-Context Learning](http://arxiv.org/abs/2211.13892v2) Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al. 2022-11-25 `API:` [langchain\_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector) `2211.10435v2` [PAL: Program-aided Language Models](http://arxiv.org/abs/2211.10435v2) Luyu Gao, Aman Madaan, Shuyan Zhou, et al. 2022-11-18 `API:` [langchain\_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), [langchain\_experimental.pal\_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain), `Cookbook:` [program\_aided\_language\_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb) `2210.03629v3` [ReAct: Synergizing Reasoning and Acting in Language Models](http://arxiv.org/abs/2210.03629v3) Shunyu Yao, Jeffrey Zhao, Dian Yu, et al. 2022-10-06 `Docs:` [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere), [docs/integrations/chat/huggingface](https://python.langchain.com/docs/integrations/chat/huggingface), [docs/integrations/tools/ionic\_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping), `API:` [langchain...create\_react\_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent), [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain) `2209.10785v2` [Deep Lake: a Lakehouse for Deep Learning](http://arxiv.org/abs/2209.10785v2) Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al. 2022-09-22 `Docs:` [docs/integrations/providers/activeloop\_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake) `2205.12654v1` [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](http://arxiv.org/abs/2205.12654v1) Kevin Heffernan, Onur Çelebi, Holger Schwenk 2022-05-25 `API:` [langchain\_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings) `2204.00498v1` [Evaluating the Text-to-SQL Capabilities of Large Language Models](http://arxiv.org/abs/2204.00498v1) Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau 2022-03-15 `API:` [langchain\_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL), [langchain\_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase) `2202.00666v5` [Locally Typical Sampling](http://arxiv.org/abs/2202.00666v5) Clara Meister, Tiago Pimentel, Gian Wiher, et al. 2022-02-01 `API:` [langchain\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference) `2103.00020v1` [Learning Transferable Visual Models From Natural Language Supervision](http://arxiv.org/abs/2103.00020v1) Alec Radford, Jong Wook Kim, Chris Hallacy, et al. 2021-02-26 `API:` [langchain\_experimental.open\_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip) `1909.05858v2` [CTRL: A Conditional Transformer Language Model for Controllable Generation](http://arxiv.org/abs/1909.05858v2) Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al. 2019-09-11 `API:` [langchain\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference) `1908.10084v1` [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](http://arxiv.org/abs/1908.10084v1) Nils Reimers, Iryna Gurevych 2019-08-27 `Docs:` [docs/integrations/text\_embedding/sentence\_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers) Self-Discover: Large Language Models Self-Compose Reasoning Structures[​](#self-discover-large-language-models-self-compose-reasoning-structures "Direct link to Self-Discover: Large Language Models Self-Compose Reasoning Structures") ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * **arXiv id:** 2402.03620v1 * **Title:** Self-Discover: Large Language Models Self-Compose Reasoning Structures * **Authors:** Pei Zhou, Jay Pujara, Xiang Ren, et al. * **Published Date:** 2024-02-06 * **URL:** [http://arxiv.org/abs/2402.03620v1](http://arxiv.org/abs/2402.03620v1) * **LangChain:** * **Cookbook:** [self-discover](https://github.com/langchain-ai/langchain/blob/master/cookbook/self-discover.ipynb) **Abstract:** We introduce SELF-DISCOVER, a general framework for LLMs to self-discover the task-intrinsic reasoning structures to tackle complex reasoning problems that are challenging for typical prompting methods. Core to the framework is a self-discovery process where LLMs select multiple atomic reasoning modules such as critical thinking and step-by-step thinking, and compose them into an explicit reasoning structure for LLMs to follow during decoding. SELF-DISCOVER substantially improves GPT-4 and PaLM 2's performance on challenging reasoning benchmarks such as BigBench-Hard, grounded agent reasoning, and MATH, by as much as 32% compared to Chain of Thought (CoT). Furthermore, SELF-DISCOVER outperforms inference-intensive methods such as CoT-Self-Consistency by more than 20%, while requiring 10-40x fewer inference compute. Finally, we show that the self-discovered reasoning structures are universally applicable across model families: from PaLM 2-L to GPT-4, and from GPT-4 to Llama2, and share commonalities with human reasoning patterns. RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval[​](#raptor-recursive-abstractive-processing-for-tree-organized-retrieval "Direct link to RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval") -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * **arXiv id:** 2401.18059v1 * **Title:** RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval * **Authors:** Parth Sarthi, Salman Abdullah, Aditi Tuli, et al. * **Published Date:** 2024-01-31 * **URL:** [http://arxiv.org/abs/2401.18059v1](http://arxiv.org/abs/2401.18059v1) * **LangChain:** * **Cookbook:** [RAPTOR](https://github.com/langchain-ai/langchain/blob/master/cookbook/RAPTOR.ipynb) **Abstract:** Retrieval-augmented language models can better adapt to changes in world state and incorporate long-tail knowledge. However, most existing methods retrieve only short contiguous chunks from a retrieval corpus, limiting holistic understanding of the overall document context. We introduce the novel approach of recursively embedding, clustering, and summarizing chunks of text, constructing a tree with differing levels of summarization from the bottom up. At inference time, our RAPTOR model retrieves from this tree, integrating information across lengthy documents at different levels of abstraction. Controlled experiments show that retrieval with recursive summaries offers significant improvements over traditional retrieval-augmented LMs on several tasks. On question-answering tasks that involve complex, multi-step reasoning, we show state-of-the-art results; for example, by coupling RAPTOR retrieval with the use of GPT-4, we can improve the best performance on the QuALITY benchmark by 20% in absolute accuracy. Corrective Retrieval Augmented Generation[​](#corrective-retrieval-augmented-generation "Direct link to Corrective Retrieval Augmented Generation") --------------------------------------------------------------------------------------------------------------------------------------------------- * **arXiv id:** 2401.15884v2 * **Title:** Corrective Retrieval Augmented Generation * **Authors:** Shi-Qi Yan, Jia-Chen Gu, Yun Zhu, et al. * **Published Date:** 2024-01-29 * **URL:** [http://arxiv.org/abs/2401.15884v2](http://arxiv.org/abs/2401.15884v2) * **LangChain:** * **Cookbook:** [langgraph\_crag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_crag.ipynb) **Abstract:** Large language models (LLMs) inevitably exhibit hallucinations since the accuracy of generated texts cannot be secured solely by the parametric knowledge they encapsulate. Although retrieval-augmented generation (RAG) is a practicable complement to LLMs, it relies heavily on the relevance of retrieved documents, raising concerns about how the model behaves if retrieval goes wrong. To this end, we propose the Corrective Retrieval Augmented Generation (CRAG) to improve the robustness of generation. Specifically, a lightweight retrieval evaluator is designed to assess the overall quality of retrieved documents for a query, returning a confidence degree based on which different knowledge retrieval actions can be triggered. Since retrieval from static and limited corpora can only return sub-optimal documents, large-scale web searches are utilized as an extension for augmenting the retrieval results. Besides, a decompose-then-recompose algorithm is designed for retrieved documents to selectively focus on key information and filter out irrelevant information in them. CRAG is plug-and-play and can be seamlessly coupled with various RAG-based approaches. Experiments on four datasets covering short- and long-form generation tasks show that CRAG can significantly improve the performance of RAG-based approaches. Mixtral of Experts[​](#mixtral-of-experts "Direct link to Mixtral of Experts") ------------------------------------------------------------------------------ * **arXiv id:** 2401.04088v1 * **Title:** Mixtral of Experts * **Authors:** Albert Q. Jiang, Alexandre Sablayrolles, Antoine Roux, et al. * **Published Date:** 2024-01-08 * **URL:** [http://arxiv.org/abs/2401.04088v1](http://arxiv.org/abs/2401.04088v1) * **LangChain:** * **Cookbook:** [together\_ai](https://github.com/langchain-ai/langchain/blob/master/cookbook/together_ai.ipynb) **Abstract:** We introduce Mixtral 8x7B, a Sparse Mixture of Experts (SMoE) language model. Mixtral has the same architecture as Mistral 7B, with the difference that each layer is composed of 8 feedforward blocks (i.e. experts). For every token, at each layer, a router network selects two experts to process the current state and combine their outputs. Even though each token only sees two experts, the selected experts can be different at each timestep. As a result, each token has access to 47B parameters, but only uses 13B active parameters during inference. Mixtral was trained with a context size of 32k tokens and it outperforms or matches Llama 2 70B and GPT-3.5 across all evaluated benchmarks. In particular, Mixtral vastly outperforms Llama 2 70B on mathematics, code generation, and multilingual benchmarks. We also provide a model fine-tuned to follow instructions, Mixtral 8x7B - Instruct, that surpasses GPT-3.5 Turbo, Claude-2.1, Gemini Pro, and Llama 2 70B - chat model on human benchmarks. Both the base and instruct models are released under the Apache 2.0 license. Dense X Retrieval: What Retrieval Granularity Should We Use?[​](#dense-x-retrieval-what-retrieval-granularity-should-we-use "Direct link to Dense X Retrieval: What Retrieval Granularity Should We Use?") ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * **arXiv id:** 2312.06648v2 * **Title:** Dense X Retrieval: What Retrieval Granularity Should We Use? * **Authors:** Tong Chen, Hongwei Wang, Sihao Chen, et al. * **Published Date:** 2023-12-11 * **URL:** [http://arxiv.org/abs/2312.06648v2](http://arxiv.org/abs/2312.06648v2) * **LangChain:** * **Template:** [propositional-retrieval](https://python.langchain.com/docs/templates/propositional-retrieval) **Abstract:** Dense retrieval has become a prominent method to obtain relevant context or world knowledge in open-domain NLP tasks. When we use a learned dense retriever on a retrieval corpus at inference time, an often-overlooked design choice is the retrieval unit in which the corpus is indexed, e.g. document, passage, or sentence. We discover that the retrieval unit choice significantly impacts the performance of both retrieval and downstream tasks. Distinct from the typical approach of using passages or sentences, we introduce a novel retrieval unit, proposition, for dense retrieval. Propositions are defined as atomic expressions within text, each encapsulating a distinct factoid and presented in a concise, self-contained natural language format. We conduct an empirical comparison of different retrieval granularity. Our results reveal that proposition-based retrieval significantly outperforms traditional passage or sentence-based methods in dense retrieval. Moreover, retrieval by proposition also enhances the performance of downstream QA tasks, since the retrieved texts are more condensed with question-relevant information, reducing the need for lengthy input tokens and minimizing the inclusion of extraneous, irrelevant information. Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models[​](#chain-of-note-enhancing-robustness-in-retrieval-augmented-language-models "Direct link to Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models") ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * **arXiv id:** 2311.09210v1 * **Title:** Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models * **Authors:** Wenhao Yu, Hongming Zhang, Xiaoman Pan, et al. * **Published Date:** 2023-11-15 * **URL:** [http://arxiv.org/abs/2311.09210v1](http://arxiv.org/abs/2311.09210v1) * **LangChain:** * **Template:** [chain-of-note-wiki](https://python.langchain.com/docs/templates/chain-of-note-wiki) **Abstract:** Retrieval-augmented language models (RALMs) represent a substantial advancement in the capabilities of large language models, notably in reducing factual hallucination by leveraging external knowledge sources. However, the reliability of the retrieved information is not always guaranteed. The retrieval of irrelevant data can lead to misguided responses, and potentially causing the model to overlook its inherent knowledge, even when it possesses adequate information to address the query. Moreover, standard RALMs often struggle to assess whether they possess adequate knowledge, both intrinsic and retrieved, to provide an accurate answer. In situations where knowledge is lacking, these systems should ideally respond with "unknown" when the answer is unattainable. In response to these challenges, we introduces Chain-of-Noting (CoN), a novel approach aimed at improving the robustness of RALMs in facing noisy, irrelevant documents and in handling unknown scenarios. The core idea of CoN is to generate sequential reading notes for retrieved documents, enabling a thorough evaluation of their relevance to the given question and integrating this information to formulate the final answer. We employed ChatGPT to create training data for CoN, which was subsequently trained on an LLaMa-2 7B model. Our experiments across four open-domain QA benchmarks show that RALMs equipped with CoN significantly outperform standard RALMs. Notably, CoN achieves an average improvement of +7.9 in EM score given entirely noisy retrieved documents and +10.5 in rejection rates for real-time questions that fall outside the pre-training knowledge scope. Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection[​](#self-rag-learning-to-retrieve-generate-and-critique-through-self-reflection "Direct link to Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection") --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * **arXiv id:** 2310.11511v1 * **Title:** Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection * **Authors:** Akari Asai, Zeqiu Wu, Yizhong Wang, et al. * **Published Date:** 2023-10-17 * **URL:** [http://arxiv.org/abs/2310.11511v1](http://arxiv.org/abs/2310.11511v1) * **LangChain:** * **Cookbook:** [langgraph\_self\_rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_self_rag.ipynb) **Abstract:** Despite their remarkable capabilities, large language models (LLMs) often produce responses containing factual inaccuracies due to their sole reliance on the parametric knowledge they encapsulate. Retrieval-Augmented Generation (RAG), an ad hoc approach that augments LMs with retrieval of relevant knowledge, decreases such issues. However, indiscriminately retrieving and incorporating a fixed number of retrieved passages, regardless of whether retrieval is necessary, or passages are relevant, diminishes LM versatility or can lead to unhelpful response generation. We introduce a new framework called Self-Reflective Retrieval-Augmented Generation (Self-RAG) that enhances an LM's quality and factuality through retrieval and self-reflection. Our framework trains a single arbitrary LM that adaptively retrieves passages on-demand, and generates and reflects on retrieved passages and its own generations using special tokens, called reflection tokens. Generating reflection tokens makes the LM controllable during the inference phase, enabling it to tailor its behavior to diverse task requirements. Experiments show that Self-RAG (7B and 13B parameters) significantly outperforms state-of-the-art LLMs and retrieval-augmented models on a diverse set of tasks. Specifically, Self-RAG outperforms ChatGPT and retrieval-augmented Llama2-chat on Open-domain QA, reasoning and fact verification tasks, and it shows significant gains in improving factuality and citation accuracy for long-form generations relative to these models. Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models[​](#take-a-step-back-evoking-reasoning-via-abstraction-in-large-language-models "Direct link to Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models") ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * **arXiv id:** 2310.06117v2 * **Title:** Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models * **Authors:** Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, et al. * **Published Date:** 2023-10-09 * **URL:** [http://arxiv.org/abs/2310.06117v2](http://arxiv.org/abs/2310.06117v2) * **LangChain:** * **Template:** [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting) * **Cookbook:** [stepback-qa](https://github.com/langchain-ai/langchain/blob/master/cookbook/stepback-qa.ipynb) **Abstract:** We present Step-Back Prompting, a simple prompting technique that enables LLMs to do abstractions to derive high-level concepts and first principles from instances containing specific details. Using the concepts and principles to guide reasoning, LLMs significantly improve their abilities in following a correct reasoning path towards the solution. We conduct experiments of Step-Back Prompting with PaLM-2L, GPT-4 and Llama2-70B models, and observe substantial performance gains on various challenging reasoning-intensive tasks including STEM, Knowledge QA, and Multi-Hop Reasoning. For instance, Step-Back Prompting improves PaLM-2L performance on MMLU (Physics and Chemistry) by 7% and 11% respectively, TimeQA by 27%, and MuSiQue by 7%. Llama 2: Open Foundation and Fine-Tuned Chat Models[​](#llama-2-open-foundation-and-fine-tuned-chat-models "Direct link to Llama 2: Open Foundation and Fine-Tuned Chat Models") -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * **arXiv id:** 2307.09288v2 * **Title:** Llama 2: Open Foundation and Fine-Tuned Chat Models * **Authors:** Hugo Touvron, Louis Martin, Kevin Stone, et al. * **Published Date:** 2023-07-18 * **URL:** [http://arxiv.org/abs/2307.09288v2](http://arxiv.org/abs/2307.09288v2) * **LangChain:** * **Cookbook:** [Semi\_Structured\_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_Structured_RAG.ipynb) **Abstract:** In this work, we develop and release Llama 2, a collection of pretrained and fine-tuned large language models (LLMs) ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama 2-Chat, are optimized for dialogue use cases. Our models outperform open-source chat models on most benchmarks we tested, and based on our human evaluations for helpfulness and safety, may be a suitable substitute for closed-source models. We provide a detailed description of our approach to fine-tuning and safety improvements of Llama 2-Chat in order to enable the community to build on our work and contribute to the responsible development of LLMs. Query Rewriting for Retrieval-Augmented Large Language Models[​](#query-rewriting-for-retrieval-augmented-large-language-models "Direct link to Query Rewriting for Retrieval-Augmented Large Language Models") --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * **arXiv id:** 2305.14283v3 * **Title:** Query Rewriting for Retrieval-Augmented Large Language Models * **Authors:** Xinbei Ma, Yeyun Gong, Pengcheng He, et al. * **Published Date:** 2023-05-23 * **URL:** [http://arxiv.org/abs/2305.14283v3](http://arxiv.org/abs/2305.14283v3) * **LangChain:** * **Template:** [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read) * **Cookbook:** [rewrite](https://github.com/langchain-ai/langchain/blob/master/cookbook/rewrite.ipynb) **Abstract:** Large Language Models (LLMs) play powerful, black-box readers in the retrieve-then-read pipeline, making remarkable progress in knowledge-intensive tasks. This work introduces a new framework, Rewrite-Retrieve-Read instead of the previous retrieve-then-read for the retrieval-augmented LLMs from the perspective of the query rewriting. Unlike prior studies focusing on adapting either the retriever or the reader, our approach pays attention to the adaptation of the search query itself, for there is inevitably a gap between the input text and the needed knowledge in retrieval. We first prompt an LLM to generate the query, then use a web search engine to retrieve contexts. Furthermore, to better align the query to the frozen modules, we propose a trainable scheme for our pipeline. A small language model is adopted as a trainable rewriter to cater to the black-box LLM reader. The rewriter is trained using the feedback of the LLM reader by reinforcement learning. Evaluation is conducted on downstream tasks, open-domain QA and multiple-choice QA. Experiments results show consistent performance improvement, indicating that our framework is proven effective and scalable, and brings a new framework for retrieval-augmented LLM. Large Language Model Guided Tree-of-Thought[​](#large-language-model-guided-tree-of-thought "Direct link to Large Language Model Guided Tree-of-Thought") --------------------------------------------------------------------------------------------------------------------------------------------------------- * **arXiv id:** 2305.08291v1 * **Title:** Large Language Model Guided Tree-of-Thought * **Authors:** Jieyi Long * **Published Date:** 2023-05-15 * **URL:** [http://arxiv.org/abs/2305.08291v1](http://arxiv.org/abs/2305.08291v1) * **LangChain:** * **API Reference:** [langchain\_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot) * **Cookbook:** [tree\_of\_thought](https://github.com/langchain-ai/langchain/blob/master/cookbook/tree_of_thought.ipynb) **Abstract:** In this paper, we introduce the Tree-of-Thought (ToT) framework, a novel approach aimed at improving the problem-solving capabilities of auto-regressive large language models (LLMs). The ToT technique is inspired by the human mind's approach for solving complex reasoning tasks through trial and error. In this process, the human mind explores the solution space through a tree-like thought process, allowing for backtracking when necessary. To implement ToT as a software system, we augment an LLM with additional modules including a prompter agent, a checker module, a memory module, and a ToT controller. In order to solve a given problem, these modules engage in a multi-round conversation with the LLM. The memory module records the conversation and state history of the problem solving process, which allows the system to backtrack to the previous steps of the thought-process and explore other directions from there. To verify the effectiveness of the proposed technique, we implemented a ToT-based solver for the Sudoku Puzzle. Experimental results show that the ToT framework can significantly increase the success rate of Sudoku puzzle solving. Our implementation of the ToT-based Sudoku solver is available on GitHub: \\url{[https://github.com/jieyilong/tree-of-thought-puzzle-solver}](https://github.com/jieyilong/tree-of-thought-puzzle-solver%7D). Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models[​](#plan-and-solve-prompting-improving-zero-shot-chain-of-thought-reasoning-by-large-language-models "Direct link to Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models") -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * **arXiv id:** 2305.04091v3 * **Title:** Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models * **Authors:** Lei Wang, Wanyu Xu, Yihuai Lan, et al. * **Published Date:** 2023-05-06 * **URL:** [http://arxiv.org/abs/2305.04091v3](http://arxiv.org/abs/2305.04091v3) * **LangChain:** * **Cookbook:** [plan\_and\_execute\_agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb) **Abstract:** Large language models (LLMs) have recently been shown to deliver impressive performance in various NLP tasks. To tackle multi-step reasoning tasks, few-shot chain-of-thought (CoT) prompting includes a few manually crafted step-by-step reasoning demonstrations which enable LLMs to explicitly generate reasoning steps and improve their reasoning task accuracy. To eliminate the manual effort, Zero-shot-CoT concatenates the target problem statement with "Let's think step by step" as an input prompt to LLMs. Despite the success of Zero-shot-CoT, it still suffers from three pitfalls: calculation errors, missing-step errors, and semantic misunderstanding errors. To address the missing-step errors, we propose Plan-and-Solve (PS) Prompting. It consists of two components: first, devising a plan to divide the entire task into smaller subtasks, and then carrying out the subtasks according to the plan. To address the calculation errors and improve the quality of generated reasoning steps, we extend PS prompting with more detailed instructions and derive PS+ prompting. We evaluate our proposed prompting strategy on ten datasets across three reasoning problems. The experimental results over GPT-3 show that our proposed zero-shot prompting consistently outperforms Zero-shot-CoT across all datasets by a large margin, is comparable to or exceeds Zero-shot-Program-of-Thought Prompting, and has comparable performance with 8-shot CoT prompting on the math reasoning problem. The code can be found at [https://github.com/AGI-Edgerunners/Plan-and-Solve-Prompting](https://github.com/AGI-Edgerunners/Plan-and-Solve-Prompting). Visual Instruction Tuning[​](#visual-instruction-tuning "Direct link to Visual Instruction Tuning") --------------------------------------------------------------------------------------------------- * **arXiv id:** 2304.08485v2 * **Title:** Visual Instruction Tuning * **Authors:** Haotian Liu, Chunyuan Li, Qingyang Wu, et al. * **Published Date:** 2023-04-17 * **URL:** [http://arxiv.org/abs/2304.08485v2](http://arxiv.org/abs/2304.08485v2) * **LangChain:** * **Cookbook:** [Semi\_structured\_and\_multi\_modal\_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb), [Semi\_structured\_multi\_modal\_RAG\_LLaMA2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb) **Abstract:** Instruction tuning large language models (LLMs) using machine-generated instruction-following data has improved zero-shot capabilities on new tasks, but the idea is less explored in the multimodal field. In this paper, we present the first attempt to use language-only GPT-4 to generate multimodal language-image instruction-following data. By instruction tuning on such generated data, we introduce LLaVA: Large Language and Vision Assistant, an end-to-end trained large multimodal model that connects a vision encoder and LLM for general-purpose visual and language understanding.Our early experiments show that LLaVA demonstrates impressive multimodel chat abilities, sometimes exhibiting the behaviors of multimodal GPT-4 on unseen images/instructions, and yields a 85.1% relative score compared with GPT-4 on a synthetic multimodal instruction-following dataset. When fine-tuned on Science QA, the synergy of LLaVA and GPT-4 achieves a new state-of-the-art accuracy of 92.53%. We make GPT-4 generated visual instruction tuning data, our model and code base publicly available. Generative Agents: Interactive Simulacra of Human Behavior[​](#generative-agents-interactive-simulacra-of-human-behavior "Direct link to Generative Agents: Interactive Simulacra of Human Behavior") ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * **arXiv id:** 2304.03442v2 * **Title:** Generative Agents: Interactive Simulacra of Human Behavior * **Authors:** Joon Sung Park, Joseph C. O'Brien, Carrie J. Cai, et al. * **Published Date:** 2023-04-07 * **URL:** [http://arxiv.org/abs/2304.03442v2](http://arxiv.org/abs/2304.03442v2) * **LangChain:** * **Cookbook:** [multiagent\_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb), [generative\_agents\_interactive\_simulacra\_of\_human\_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb) **Abstract:** Believable proxies of human behavior can empower interactive applications ranging from immersive environments to rehearsal spaces for interpersonal communication to prototyping tools. In this paper, we introduce generative agents--computational software agents that simulate believable human behavior. Generative agents wake up, cook breakfast, and head to work; artists paint, while authors write; they form opinions, notice each other, and initiate conversations; they remember and reflect on days past as they plan the next day. To enable generative agents, we describe an architecture that extends a large language model to store a complete record of the agent's experiences using natural language, synthesize those memories over time into higher-level reflections, and retrieve them dynamically to plan behavior. We instantiate generative agents to populate an interactive sandbox environment inspired by The Sims, where end users can interact with a small town of twenty five agents using natural language. In an evaluation, these generative agents produce believable individual and emergent social behaviors: for example, starting with only a single user-specified notion that one agent wants to throw a Valentine's Day party, the agents autonomously spread invitations to the party over the next two days, make new acquaintances, ask each other out on dates to the party, and coordinate to show up for the party together at the right time. We demonstrate through ablation that the components of our agent architecture--observation, planning, and reflection--each contribute critically to the believability of agent behavior. By fusing large language models with computational, interactive agents, this work introduces architectural and interaction patterns for enabling believable simulations of human behavior. CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society[​](#camel-communicative-agents-for-mind-exploration-of-large-language-model-society "Direct link to CAMEL: Communicative Agents for \"Mind\" Exploration of Large Language Model Society") ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * **arXiv id:** 2303.17760v2 * **Title:** CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society * **Authors:** Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, et al. * **Published Date:** 2023-03-31 * **URL:** [http://arxiv.org/abs/2303.17760v2](http://arxiv.org/abs/2303.17760v2) * **LangChain:** * **Cookbook:** [camel\_role\_playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb) **Abstract:** The rapid advancement of chat-based language models has led to remarkable progress in complex task-solving. However, their success heavily relies on human input to guide the conversation, which can be challenging and time-consuming. This paper explores the potential of building scalable techniques to facilitate autonomous cooperation among communicative agents, and provides insight into their "cognitive" processes. To address the challenges of achieving autonomous cooperation, we propose a novel communicative agent framework named role-playing. Our approach involves using inception prompting to guide chat agents toward task completion while maintaining consistency with human intentions. We showcase how role-playing can be used to generate conversational data for studying the behaviors and capabilities of a society of agents, providing a valuable resource for investigating conversational language models. In particular, we conduct comprehensive studies on instruction-following cooperation in multi-agent settings. Our contributions include introducing a novel communicative agent framework, offering a scalable approach for studying the cooperative behaviors and capabilities of multi-agent systems, and open-sourcing our library to support research on communicative agents and beyond: [https://github.com/camel-ai/camel](https://github.com/camel-ai/camel). HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face[​](#hugginggpt-solving-ai-tasks-with-chatgpt-and-its-friends-in-hugging-face "Direct link to HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face") -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * **arXiv id:** 2303.17580v4 * **Title:** HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face * **Authors:** Yongliang Shen, Kaitao Song, Xu Tan, et al. * **Published Date:** 2023-03-30 * **URL:** [http://arxiv.org/abs/2303.17580v4](http://arxiv.org/abs/2303.17580v4) * **LangChain:** * **API Reference:** [langchain\_experimental.autonomous\_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents) * **Cookbook:** [hugginggpt](https://github.com/langchain-ai/langchain/blob/master/cookbook/hugginggpt.ipynb) **Abstract:** Solving complicated AI tasks with different domains and modalities is a key step toward artificial general intelligence. While there are numerous AI models available for various domains and modalities, they cannot handle complicated AI tasks autonomously. Considering large language models (LLMs) have exhibited exceptional abilities in language understanding, generation, interaction, and reasoning, we advocate that LLMs could act as a controller to manage existing AI models to solve complicated AI tasks, with language serving as a generic interface to empower this. Based on this philosophy, we present HuggingGPT, an LLM-powered agent that leverages LLMs (e.g., ChatGPT) to connect various AI models in machine learning communities (e.g., Hugging Face) to solve AI tasks. Specifically, we use ChatGPT to conduct task planning when receiving a user request, select models according to their function descriptions available in Hugging Face, execute each subtask with the selected AI model, and summarize the response according to the execution results. By leveraging the strong language capability of ChatGPT and abundant AI models in Hugging Face, HuggingGPT can tackle a wide range of sophisticated AI tasks spanning different modalities and domains and achieve impressive results in language, vision, speech, and other challenging tasks, which paves a new way towards the realization of artificial general intelligence. GPT-4 Technical Report[​](#gpt-4-technical-report "Direct link to GPT-4 Technical Report") ------------------------------------------------------------------------------------------ * **arXiv id:** 2303.08774v6 * **Title:** GPT-4 Technical Report * **Authors:** OpenAI, Josh Achiam, Steven Adler, et al. * **Published Date:** 2023-03-15 * **URL:** [http://arxiv.org/abs/2303.08774v6](http://arxiv.org/abs/2303.08774v6) * **LangChain:** * **Documentation:** [docs/integrations/vectorstores/mongodb\_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas) **Abstract:** We report the development of GPT-4, a large-scale, multimodal model which can accept image and text inputs and produce text outputs. While less capable than humans in many real-world scenarios, GPT-4 exhibits human-level performance on various professional and academic benchmarks, including passing a simulated bar exam with a score around the top 10% of test takers. GPT-4 is a Transformer-based model pre-trained to predict the next token in a document. The post-training alignment process results in improved performance on measures of factuality and adherence to desired behavior. A core component of this project was developing infrastructure and optimization methods that behave predictably across a wide range of scales. This allowed us to accurately predict some aspects of GPT-4's performance based on models trained with no more than 1/1,000th the compute of GPT-4. A Watermark for Large Language Models[​](#a-watermark-for-large-language-models "Direct link to A Watermark for Large Language Models") --------------------------------------------------------------------------------------------------------------------------------------- * **arXiv id:** 2301.10226v4 * **Title:** A Watermark for Large Language Models * **Authors:** John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al. * **Published Date:** 2023-01-24 * **URL:** [http://arxiv.org/abs/2301.10226v4](http://arxiv.org/abs/2301.10226v4) * **LangChain:** * **API Reference:** [langchain\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain\_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference) **Abstract:** Potential harms of large language models can be mitigated by watermarking model output, i.e., embedding signals into generated text that are invisible to humans but algorithmically detectable from a short span of tokens. We propose a watermarking framework for proprietary language models. The watermark can be embedded with negligible impact on text quality, and can be detected using an efficient open-source algorithm without access to the language model API or parameters. The watermark works by selecting a randomized set of "green" tokens before a word is generated, and then softly promoting use of green tokens during sampling. We propose a statistical test for detecting the watermark with interpretable p-values, and derive an information-theoretic framework for analyzing the sensitivity of the watermark. We test the watermark using a multi-billion parameter model from the Open Pretrained Transformer (OPT) family, and discuss robustness and security. Precise Zero-Shot Dense Retrieval without Relevance Labels[​](#precise-zero-shot-dense-retrieval-without-relevance-labels "Direct link to Precise Zero-Shot Dense Retrieval without Relevance Labels") ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ * **arXiv id:** 2212.10496v1 * **Title:** Precise Zero-Shot Dense Retrieval without Relevance Labels * **Authors:** Luyu Gao, Xueguang Ma, Jimmy Lin, et al. * **Published Date:** 2022-12-20 * **URL:** [http://arxiv.org/abs/2212.10496v1](http://arxiv.org/abs/2212.10496v1) * **LangChain:** * **API Reference:** [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder) * **Template:** [hyde](https://python.langchain.com/docs/templates/hyde) * **Cookbook:** [hypothetical\_document\_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb) **Abstract:** While dense retrieval has been shown effective and efficient across tasks and languages, it remains difficult to create effective fully zero-shot dense retrieval systems when no relevance label is available. In this paper, we recognize the difficulty of zero-shot learning and encoding relevance. Instead, we propose to pivot through Hypothetical Document Embeddings~(HyDE). Given a query, HyDE first zero-shot instructs an instruction-following language model (e.g. InstructGPT) to generate a hypothetical document. The document captures relevance patterns but is unreal and may contain false details. Then, an unsupervised contrastively learned encoder~(e.g. Contriever) encodes the document into an embedding vector. This vector identifies a neighborhood in the corpus embedding space, where similar real documents are retrieved based on vector similarity. This second step ground the generated document to the actual corpus, with the encoder's dense bottleneck filtering out the incorrect details. Our experiments show that HyDE significantly outperforms the state-of-the-art unsupervised dense retriever Contriever and shows strong performance comparable to fine-tuned retrievers, across various tasks (e.g. web search, QA, fact verification) and languages~(e.g. sw, ko, ja). Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments[​](#robust-and-explainable-identification-of-logical-fallacies-in-natural-language-arguments "Direct link to Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments") ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ * **arXiv id:** 2212.07425v3 * **Title:** Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments * **Authors:** Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al. * **Published Date:** 2022-12-12 * **URL:** [http://arxiv.org/abs/2212.07425v3](http://arxiv.org/abs/2212.07425v3) * **LangChain:** * **API Reference:** [langchain\_experimental.fallacy\_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal) **Abstract:** The spread of misinformation, propaganda, and flawed argumentation has been amplified in the Internet era. Given the volume of data and the subtlety of identifying violations of argumentation norms, supporting information analytics tasks, like content moderation, with trustworthy methods that can identify logical fallacies is essential. In this paper, we formalize prior theoretical work on logical fallacies into a comprehensive three-stage evaluation framework of detection, coarse-grained, and fine-grained classification. We adapt existing evaluation datasets for each stage of the evaluation. We employ three families of robust and explainable methods based on prototype reasoning, instance-based reasoning, and knowledge injection. The methods combine language models with background knowledge and explainable mechanisms. Moreover, we address data sparsity with strategies for data augmentation and curriculum learning. Our three-stage framework natively consolidates prior datasets and methods from existing tasks, like propaganda detection, serving as an overarching evaluation testbed. We extensively evaluate these methods on our datasets, focusing on their robustness and explainability. Our results provide insight into the strengths and weaknesses of the methods on different components and fallacy classes, indicating that fallacy identification is a challenging task that may require specialized forms of reasoning to capture various classes. We share our open-source code and data on GitHub to support further work on logical fallacy identification. Complementary Explanations for Effective In-Context Learning[​](#complementary-explanations-for-effective-in-context-learning "Direct link to Complementary Explanations for Effective In-Context Learning") ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ * **arXiv id:** 2211.13892v2 * **Title:** Complementary Explanations for Effective In-Context Learning * **Authors:** Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al. * **Published Date:** 2022-11-25 * **URL:** [http://arxiv.org/abs/2211.13892v2](http://arxiv.org/abs/2211.13892v2) * **LangChain:** * **API Reference:** [langchain\_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector) **Abstract:** Large language models (LLMs) have exhibited remarkable capabilities in learning from explanations in prompts, but there has been limited understanding of exactly how these explanations function or why they are effective. This work aims to better understand the mechanisms by which explanations are used for in-context learning. We first study the impact of two different factors on the performance of prompts with explanations: the computation trace (the way the solution is decomposed) and the natural language used to express the prompt. By perturbing explanations on three controlled tasks, we show that both factors contribute to the effectiveness of explanations. We further study how to form maximally effective sets of explanations for solving a given test query. We find that LLMs can benefit from the complementarity of the explanation set: diverse reasoning skills shown by different exemplars can lead to better performance. Therefore, we propose a maximal marginal relevance-based exemplar selection approach for constructing exemplar sets that are both relevant as well as complementary, which successfully improves the in-context learning performance across three real-world tasks on multiple LLMs. PAL: Program-aided Language Models[​](#pal-program-aided-language-models "Direct link to PAL: Program-aided Language Models") ----------------------------------------------------------------------------------------------------------------------------- * **arXiv id:** 2211.10435v2 * **Title:** PAL: Program-aided Language Models * **Authors:** Luyu Gao, Aman Madaan, Shuyan Zhou, et al. * **Published Date:** 2022-11-18 * **URL:** [http://arxiv.org/abs/2211.10435v2](http://arxiv.org/abs/2211.10435v2) * **LangChain:** * **API Reference:** [langchain\_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), [langchain\_experimental.pal\_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain) * **Cookbook:** [program\_aided\_language\_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb) **Abstract:** Large language models (LLMs) have recently demonstrated an impressive ability to perform arithmetic and symbolic reasoning tasks, when provided with a few examples at test time ("few-shot prompting"). Much of this success can be attributed to prompting methods such as "chain-of-thought'', which employ LLMs for both understanding the problem description by decomposing it into steps, as well as solving each step of the problem. While LLMs seem to be adept at this sort of step-by-step decomposition, LLMs often make logical and arithmetic mistakes in the solution part, even when the problem is decomposed correctly. In this paper, we present Program-Aided Language models (PAL): a novel approach that uses the LLM to read natural language problems and generate programs as the intermediate reasoning steps, but offloads the solution step to a runtime such as a Python interpreter. With PAL, decomposing the natural language problem into runnable steps remains the only learning task for the LLM, while solving is delegated to the interpreter. We demonstrate this synergy between a neural LLM and a symbolic interpreter across 13 mathematical, symbolic, and algorithmic reasoning tasks from BIG-Bench Hard and other benchmarks. In all these natural language reasoning tasks, generating code using an LLM and reasoning using a Python interpreter leads to more accurate results than much larger models. For example, PAL using Codex achieves state-of-the-art few-shot accuracy on the GSM8K benchmark of math word problems, surpassing PaLM-540B which uses chain-of-thought by absolute 15% top-1. Our code and data are publicly available at [http://reasonwithpal.com/](http://reasonwithpal.com/) . ReAct: Synergizing Reasoning and Acting in Language Models[​](#react-synergizing-reasoning-and-acting-in-language-models "Direct link to ReAct: Synergizing Reasoning and Acting in Language Models") ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * **arXiv id:** 2210.03629v3 * **Title:** ReAct: Synergizing Reasoning and Acting in Language Models * **Authors:** Shunyu Yao, Jeffrey Zhao, Dian Yu, et al. * **Published Date:** 2022-10-06 * **URL:** [http://arxiv.org/abs/2210.03629v3](http://arxiv.org/abs/2210.03629v3) * **LangChain:** * **Documentation:** [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere), [docs/integrations/chat/huggingface](https://python.langchain.com/docs/integrations/chat/huggingface), [docs/integrations/tools/ionic\_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping) * **API Reference:** [langchain...create\_react\_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent), [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain) **Abstract:** While large language models (LLMs) have demonstrated impressive capabilities across tasks in language understanding and interactive decision making, their abilities for reasoning (e.g. chain-of-thought prompting) and acting (e.g. action plan generation) have primarily been studied as separate topics. In this paper, we explore the use of LLMs to generate both reasoning traces and task-specific actions in an interleaved manner, allowing for greater synergy between the two: reasoning traces help the model induce, track, and update action plans as well as handle exceptions, while actions allow it to interface with external sources, such as knowledge bases or environments, to gather additional information. We apply our approach, named ReAct, to a diverse set of language and decision making tasks and demonstrate its effectiveness over state-of-the-art baselines, as well as improved human interpretability and trustworthiness over methods without reasoning or acting components. Concretely, on question answering (HotpotQA) and fact verification (Fever), ReAct overcomes issues of hallucination and error propagation prevalent in chain-of-thought reasoning by interacting with a simple Wikipedia API, and generates human-like task-solving trajectories that are more interpretable than baselines without reasoning traces. On two interactive decision making benchmarks (ALFWorld and WebShop), ReAct outperforms imitation and reinforcement learning methods by an absolute success rate of 34% and 10% respectively, while being prompted with only one or two in-context examples. Project site with code: [https://react-lm.github.io](https://react-lm.github.io) Deep Lake: a Lakehouse for Deep Learning[​](#deep-lake-a-lakehouse-for-deep-learning "Direct link to Deep Lake: a Lakehouse for Deep Learning") ----------------------------------------------------------------------------------------------------------------------------------------------- * **arXiv id:** 2209.10785v2 * **Title:** Deep Lake: a Lakehouse for Deep Learning * **Authors:** Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al. * **Published Date:** 2022-09-22 * **URL:** [http://arxiv.org/abs/2209.10785v2](http://arxiv.org/abs/2209.10785v2) * **LangChain:** * **Documentation:** [docs/integrations/providers/activeloop\_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake) **Abstract:** Traditional data lakes provide critical data infrastructure for analytical workloads by enabling time travel, running SQL queries, ingesting data with ACID transactions, and visualizing petabyte-scale datasets on cloud storage. They allow organizations to break down data silos, unlock data-driven decision-making, improve operational efficiency, and reduce costs. However, as deep learning usage increases, traditional data lakes are not well-designed for applications such as natural language processing (NLP), audio processing, computer vision, and applications involving non-tabular datasets. This paper presents Deep Lake, an open-source lakehouse for deep learning applications developed at Activeloop. Deep Lake maintains the benefits of a vanilla data lake with one key difference: it stores complex data, such as images, videos, annotations, as well as tabular data, in the form of tensors and rapidly streams the data over the network to (a) Tensor Query Language, (b) in-browser visualization engine, or (c) deep learning frameworks without sacrificing GPU utilization. Datasets stored in Deep Lake can be accessed from PyTorch, TensorFlow, JAX, and integrate with numerous MLOps tools. Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages[​](#bitext-mining-using-distilled-sentence-representations-for-low-resource-languages "Direct link to Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages") --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * **arXiv id:** 2205.12654v1 * **Title:** Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages * **Authors:** Kevin Heffernan, Onur Çelebi, Holger Schwenk * **Published Date:** 2022-05-25 * **URL:** [http://arxiv.org/abs/2205.12654v1](http://arxiv.org/abs/2205.12654v1) * **LangChain:** * **API Reference:** [langchain\_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings) **Abstract:** Scaling multilingual representation learning beyond the hundred most frequent languages is challenging, in particular to cover the long tail of low-resource languages. A promising approach has been to train one-for-all multilingual models capable of cross-lingual transfer, but these models often suffer from insufficient capacity and interference between unrelated languages. Instead, we move away from this approach and focus on training multiple language (family) specific representations, but most prominently enable all languages to still be encoded in the same representational space. To achieve this, we focus on teacher-student training, allowing all encoders to be mutually compatible for bitext mining, and enabling fast learning of new languages. We introduce a new teacher-student training scheme which combines supervised and self-supervised training, allowing encoders to take advantage of monolingual training data, which is valuable in the low-resource setting. Our approach significantly outperforms the original LASER encoder. We study very low-resource languages and handle 50 African languages, many of which are not covered by any other model. For these languages, we train sentence encoders, mine bitexts, and validate the bitexts by training NMT systems. Evaluating the Text-to-SQL Capabilities of Large Language Models[​](#evaluating-the-text-to-sql-capabilities-of-large-language-models "Direct link to Evaluating the Text-to-SQL Capabilities of Large Language Models") ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ * **arXiv id:** 2204.00498v1 * **Title:** Evaluating the Text-to-SQL Capabilities of Large Language Models * **Authors:** Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau * **Published Date:** 2022-03-15 * **URL:** [http://arxiv.org/abs/2204.00498v1](http://arxiv.org/abs/2204.00498v1) * **LangChain:** * **API Reference:** [langchain\_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL), [langchain\_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase) **Abstract:** We perform an empirical evaluation of Text-to-SQL capabilities of the Codex language model. We find that, without any finetuning, Codex is a strong baseline on the Spider benchmark; we also analyze the failure modes of Codex in this setting. Furthermore, we demonstrate on the GeoQuery and Scholar benchmarks that a small number of in-domain examples provided in the prompt enables Codex to perform better than state-of-the-art models finetuned on such few-shot examples. Locally Typical Sampling[​](#locally-typical-sampling "Direct link to Locally Typical Sampling") ------------------------------------------------------------------------------------------------ * **arXiv id:** 2202.00666v5 * **Title:** Locally Typical Sampling * **Authors:** Clara Meister, Tiago Pimentel, Gian Wiher, et al. * **Published Date:** 2022-02-01 * **URL:** [http://arxiv.org/abs/2202.00666v5](http://arxiv.org/abs/2202.00666v5) * **LangChain:** * **API Reference:** [langchain\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference) **Abstract:** Today's probabilistic language generators fall short when it comes to producing coherent and fluent text despite the fact that the underlying models perform well under standard metrics, e.g., perplexity. This discrepancy has puzzled the language generation community for the last few years. In this work, we posit that the abstraction of natural language generation as a discrete stochastic process--which allows for an information-theoretic analysis--can provide new insights into the behavior of probabilistic language generators, e.g., why high-probability texts can be dull or repetitive. Humans use language as a means of communicating information, aiming to do so in a simultaneously efficient and error-minimizing manner; in fact, psycholinguistics research suggests humans choose each word in a string with this subconscious goal in mind. We formally define the set of strings that meet this criterion: those for which each word has an information content close to the expected information content, i.e., the conditional entropy of our model. We then propose a simple and efficient procedure for enforcing this criterion when generating from probabilistic models, which we call locally typical sampling. Automatic and human evaluations show that, in comparison to nucleus and top-k sampling, locally typical sampling offers competitive performance (in both abstractive summarization and story generation) in terms of quality while consistently reducing degenerate repetitions. Learning Transferable Visual Models From Natural Language Supervision[​](#learning-transferable-visual-models-from-natural-language-supervision "Direct link to Learning Transferable Visual Models From Natural Language Supervision") --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * **arXiv id:** 2103.00020v1 * **Title:** Learning Transferable Visual Models From Natural Language Supervision * **Authors:** Alec Radford, Jong Wook Kim, Chris Hallacy, et al. * **Published Date:** 2021-02-26 * **URL:** [http://arxiv.org/abs/2103.00020v1](http://arxiv.org/abs/2103.00020v1) * **LangChain:** * **API Reference:** [langchain\_experimental.open\_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip) **Abstract:** State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This restricted form of supervision limits their generality and usability since additional labeled data is needed to specify any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400 million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained model weights at [https://github.com/OpenAI/CLIP](https://github.com/OpenAI/CLIP). CTRL: A Conditional Transformer Language Model for Controllable Generation[​](#ctrl-a-conditional-transformer-language-model-for-controllable-generation "Direct link to CTRL: A Conditional Transformer Language Model for Controllable Generation") ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * **arXiv id:** 1909.05858v2 * **Title:** CTRL: A Conditional Transformer Language Model for Controllable Generation * **Authors:** Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al. * **Published Date:** 2019-09-11 * **URL:** [http://arxiv.org/abs/1909.05858v2](http://arxiv.org/abs/1909.05858v2) * **LangChain:** * **API Reference:** [langchain\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference) **Abstract:** Large-scale language models show promising text generation capabilities, but users cannot easily control particular aspects of the generated text. We release CTRL, a 1.63 billion-parameter conditional transformer language model, trained to condition on control codes that govern style, content, and task-specific behavior. Control codes were derived from structure that naturally co-occurs with raw text, preserving the advantages of unsupervised learning while providing more explicit control over text generation. These codes also allow CTRL to predict which parts of the training data are most likely given a sequence. This provides a potential method for analyzing large amounts of data via model-based source attribution. We have released multiple full-sized, pretrained versions of CTRL at [https://github.com/salesforce/ctrl](https://github.com/salesforce/ctrl). Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks[​](#sentence-bert-sentence-embeddings-using-siamese-bert-networks "Direct link to Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks") ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * **arXiv id:** 1908.10084v1 * **Title:** Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks * **Authors:** Nils Reimers, Iryna Gurevych * **Published Date:** 2019-08-27 * **URL:** [http://arxiv.org/abs/1908.10084v1](http://arxiv.org/abs/1908.10084v1) * **LangChain:** * **Documentation:** [docs/integrations/text\_embedding/sentence\_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers) **Abstract:** BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods. [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/additional_resources/arxiv_references.mdx) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). * [Summary](#summary) * [Self-Discover: Large Language Models Self-Compose Reasoning Structures](#self-discover-large-language-models-self-compose-reasoning-structures) * [RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval](#raptor-recursive-abstractive-processing-for-tree-organized-retrieval) * [Corrective Retrieval Augmented Generation](#corrective-retrieval-augmented-generation) * [Mixtral of Experts](#mixtral-of-experts) * [Dense X Retrieval: What Retrieval Granularity Should We Use?](#dense-x-retrieval-what-retrieval-granularity-should-we-use) * [Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models](#chain-of-note-enhancing-robustness-in-retrieval-augmented-language-models) * [Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection](#self-rag-learning-to-retrieve-generate-and-critique-through-self-reflection) * [Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models](#take-a-step-back-evoking-reasoning-via-abstraction-in-large-language-models) * [Llama 2: Open Foundation and Fine-Tuned Chat Models](#llama-2-open-foundation-and-fine-tuned-chat-models) * [Query Rewriting for Retrieval-Augmented Large Language Models](#query-rewriting-for-retrieval-augmented-large-language-models) * [Large Language Model Guided Tree-of-Thought](#large-language-model-guided-tree-of-thought) * [Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models](#plan-and-solve-prompting-improving-zero-shot-chain-of-thought-reasoning-by-large-language-models) * [Visual Instruction Tuning](#visual-instruction-tuning) * [Generative Agents: Interactive Simulacra of Human Behavior](#generative-agents-interactive-simulacra-of-human-behavior) * [CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society](#camel-communicative-agents-for-mind-exploration-of-large-language-model-society) * [HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face](#hugginggpt-solving-ai-tasks-with-chatgpt-and-its-friends-in-hugging-face) * [GPT-4 Technical Report](#gpt-4-technical-report) * [A Watermark for Large Language Models](#a-watermark-for-large-language-models) * [Precise Zero-Shot Dense Retrieval without Relevance Labels](#precise-zero-shot-dense-retrieval-without-relevance-labels) * [Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments](#robust-and-explainable-identification-of-logical-fallacies-in-natural-language-arguments) * [Complementary Explanations for Effective In-Context Learning](#complementary-explanations-for-effective-in-context-learning) * [PAL: Program-aided Language Models](#pal-program-aided-language-models) * [ReAct: Synergizing Reasoning and Acting in Language Models](#react-synergizing-reasoning-and-acting-in-language-models) * [Deep Lake: a Lakehouse for Deep Learning](#deep-lake-a-lakehouse-for-deep-learning) * [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](#bitext-mining-using-distilled-sentence-representations-for-low-resource-languages) * [Evaluating the Text-to-SQL Capabilities of Large Language Models](#evaluating-the-text-to-sql-capabilities-of-large-language-models) * [Locally Typical Sampling](#locally-typical-sampling) * [Learning Transferable Visual Models From Natural Language Supervision](#learning-transferable-visual-models-from-natural-language-supervision) * [CTRL: A Conditional Transformer Language Model for Controllable Generation](#ctrl-a-conditional-transformer-language-model-for-controllable-generation) * [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](#sentence-bert-sentence-embeddings-using-siamese-bert-networks)
null
https://python.langchain.com/v0.2/docs/how_to/merge_message_runs/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to merge consecutive messages of the same type On this page How to merge consecutive messages of the same type ================================================== Certain models do not support passing in consecutive messages of the same type (a.k.a. "runs" of the same message type). The `merge_message_runs` utility makes it easy to merge consecutive messages of the same type. Basic usage[​](#basic-usage "Direct link to Basic usage") --------------------------------------------------------- from langchain_core.messages import ( AIMessage, HumanMessage, SystemMessage, merge_message_runs,)messages = [ SystemMessage("you're a good assistant."), SystemMessage("you always respond with a joke."), HumanMessage([{"type": "text", "text": "i wonder why it's called langchain"}]), HumanMessage("and who is harrison chasing anyways"), AIMessage( 'Well, I guess they thought "WordRope" and "SentenceString" just didn\'t have the same ring to it!' ), AIMessage("Why, he's probably chasing after the last cup of coffee in the office!"),]merged = merge_message_runs(messages)print("\n\n".join([repr(x) for x in merged])) **API Reference:**[AIMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html) | [HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) | [SystemMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.system.SystemMessage.html) | [merge\_message\_runs](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.utils.merge_message_runs.html) SystemMessage(content="you're a good assistant.\nyou always respond with a joke.")HumanMessage(content=[{'type': 'text', 'text': "i wonder why it's called langchain"}, 'and who is harrison chasing anyways'])AIMessage(content='Well, I guess they thought "WordRope" and "SentenceString" just didn\'t have the same ring to it!\nWhy, he\'s probably chasing after the last cup of coffee in the office!') Notice that if the contents of one of the messages to merge is a list of content blocks then the merged message will have a list of content blocks. And if both messages to merge have string contents then those are concatenated with a newline character. Chaining[​](#chaining "Direct link to Chaining") ------------------------------------------------ `merge_message_runs` can be used in an imperatively (like above) or declaratively, making it easy to compose with other components in a chain: # pip install -U langchain-anthropicfrom langchain_anthropic import ChatAnthropicllm = ChatAnthropic(model="claude-3-sonnet-20240229", temperature=0)# Notice we don't pass in messages. This creates# a RunnableLambda that takes messages as inputmerger = merge_message_runs()chain = merger | llmchain.invoke(messages) **API Reference:**[ChatAnthropic](https://api.python.langchain.com/en/latest/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html) AIMessage(content=[], response_metadata={'id': 'msg_01D6R8Naum57q8qBau9vLBUX', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 84, 'output_tokens': 3}}, id='run-ac0c465b-b54f-4b8b-9295-e5951250d653-0', usage_metadata={'input_tokens': 84, 'output_tokens': 3, 'total_tokens': 87}) Looking at the LangSmith trace we can see that before the messages are passed to the model they are merged: [https://smith.langchain.com/public/ab558677-cac9-4c59-9066-1ecce5bcd87c/r](https://smith.langchain.com/public/ab558677-cac9-4c59-9066-1ecce5bcd87c/r) Looking at just the merger, we can see that it's a Runnable object that can be invoked like all Runnables: merger.invoke(messages) [SystemMessage(content="you're a good assistant.\nyou always respond with a joke."), HumanMessage(content=[{'type': 'text', 'text': "i wonder why it's called langchain"}, 'and who is harrison chasing anyways']), AIMessage(content='Well, I guess they thought "WordRope" and "SentenceString" just didn\'t have the same ring to it!\nWhy, he\'s probably chasing after the last cup of coffee in the office!')] API reference[​](#api-reference "Direct link to API reference") --------------------------------------------------------------- For a complete description of all arguments head to the API reference: [https://api.python.langchain.com/en/latest/messages/langchain\_core.messages.utils.merge\_message\_runs.html](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.utils.merge_message_runs.html) [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/merge_message_runs.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to split Markdown by Headers ](/v0.2/docs/how_to/markdown_header_metadata_splitter/)[ Next How to add message history ](/v0.2/docs/how_to/message_history/) * [Basic usage](#basic-usage) * [Chaining](#chaining) * [API reference](#api-reference)
null
https://python.langchain.com/v0.2/docs/how_to/long_context_reorder/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to reorder retrieved results to mitigate the "lost in the middle" effect How to reorder retrieved results to mitigate the "lost in the middle" effect ============================================================================ Substantial performance degradations in [RAG](/v0.2/docs/tutorials/rag/) applications have been [documented](https://arxiv.org/abs/2307.03172) as the number of retrieved documents grows (e.g., beyond ten). In brief: models are liable to miss relevant information in the middle of long contexts. By contrast, queries against vector stores will typically return documents in descending order of relevance (e.g., as measured by cosine similarity of [embeddings](/v0.2/docs/concepts/#embedding-models)). To mitigate the ["lost in the middle"](https://arxiv.org/abs/2307.03172) effect, you can re-order documents after retrieval such that the most relevant documents are positioned at extrema (e.g., the first and last pieces of context), and the least relevant documents are positioned in the middle. In some cases this can help surface the most relevant information to LLMs. The [LongContextReorder](https://api.python.langchain.com/en/latest/document_transformers/langchain_community.document_transformers.long_context_reorder.LongContextReorder.html) document transformer implements this re-ordering procedure. Below we demonstrate an example. %pip install --upgrade --quiet sentence-transformers langchain-chroma langchain langchain-openai langchain-huggingface > /dev/null First we embed some artificial documents and index them in an (in-memory) [Chroma](/v0.2/docs/integrations/providers/chroma/) vector store. We will use [Hugging Face](/v0.2/docs/integrations/text_embedding/huggingfacehub/) embeddings, but any LangChain vector store or embeddings model will suffice. from langchain_chroma import Chromafrom langchain_huggingface import HuggingFaceEmbeddings# Get embeddings.embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")texts = [ "Basquetball is a great sport.", "Fly me to the moon is one of my favourite songs.", "The Celtics are my favourite team.", "This is a document about the Boston Celtics", "I simply love going to the movies", "The Boston Celtics won the game by 20 points", "This is just a random text.", "Elden Ring is one of the best games in the last 15 years.", "L. Kornet is one of the best Celtics players.", "Larry Bird was an iconic NBA player.",]# Create a retrieverretriever = Chroma.from_texts(texts, embedding=embeddings).as_retriever( search_kwargs={"k": 10})query = "What can you tell me about the Celtics?"# Get relevant documents ordered by relevance scoredocs = retriever.invoke(query)docs **API Reference:**[HuggingFaceEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_huggingface.embeddings.huggingface.HuggingFaceEmbeddings.html) [Document(page_content='This is a document about the Boston Celtics'), Document(page_content='The Celtics are my favourite team.'), Document(page_content='L. Kornet is one of the best Celtics players.'), Document(page_content='The Boston Celtics won the game by 20 points'), Document(page_content='Larry Bird was an iconic NBA player.'), Document(page_content='Elden Ring is one of the best games in the last 15 years.'), Document(page_content='Basquetball is a great sport.'), Document(page_content='I simply love going to the movies'), Document(page_content='Fly me to the moon is one of my favourite songs.'), Document(page_content='This is just a random text.')] Note that documents are returned in descending order of relevance to the query. The `LongContextReorder` document transformer will implement the re-ordering described above: from langchain_community.document_transformers import LongContextReorder# Reorder the documents:# Less relevant document will be at the middle of the list and more# relevant elements at beginning / end.reordering = LongContextReorder()reordered_docs = reordering.transform_documents(docs)# Confirm that the 4 relevant documents are at beginning and end.reordered_docs **API Reference:**[LongContextReorder](https://api.python.langchain.com/en/latest/document_transformers/langchain_community.document_transformers.long_context_reorder.LongContextReorder.html) [Document(page_content='The Celtics are my favourite team.'), Document(page_content='The Boston Celtics won the game by 20 points'), Document(page_content='Elden Ring is one of the best games in the last 15 years.'), Document(page_content='I simply love going to the movies'), Document(page_content='This is just a random text.'), Document(page_content='Fly me to the moon is one of my favourite songs.'), Document(page_content='Basquetball is a great sport.'), Document(page_content='Larry Bird was an iconic NBA player.'), Document(page_content='L. Kornet is one of the best Celtics players.'), Document(page_content='This is a document about the Boston Celtics')] Below, we show how to incorporate the re-ordered documents into a simple question-answering chain: from langchain.chains.combine_documents import create_stuff_documents_chainfrom langchain_core.prompts import PromptTemplatefrom langchain_openai import OpenAIllm = OpenAI()prompt_template = """Given these texts:-----{context}-----Please answer the following question:{query}"""prompt = PromptTemplate( template=prompt_template, input_variables=["context", "query"],)# Create and invoke the chain:chain = create_stuff_documents_chain(llm, prompt)response = chain.invoke({"context": reordered_docs, "query": query})print(response) **API Reference:**[create\_stuff\_documents\_chain](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.stuff.create_stuff_documents_chain.html) | [PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html) | [OpenAI](https://api.python.langchain.com/en/latest/llms/langchain_openai.llms.base.OpenAI.html) The Celtics are a professional basketball team and one of the most iconic franchises in the NBA. They are highly regarded and have a large fan base. The team has had many successful seasons and is often considered one of the top teams in the league. They have a strong history and have produced many great players, such as Larry Bird and L. Kornet. The team is based in Boston and is often referred to as the Boston Celtics. [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/long_context_reorder.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to get log probabilities ](/v0.2/docs/how_to/logprobs/)[ Next How to split Markdown by Headers ](/v0.2/docs/how_to/markdown_header_metadata_splitter/)
null
https://python.langchain.com/v0.2/docs/tutorials/llm_chain/
* [](/v0.2/) * [Tutorials](/v0.2/docs/tutorials/) * Build a Simple LLM Application with LCEL On this page Build a Simple LLM Application with LCEL ======================================== In this quickstart we'll show you how to build a simple LLM application with LangChain. This application will translate text from English into another language. This is a relatively simple LLM application - it's just a single LLM call plus some prompting. Still, this is a great way to get started with LangChain - a lot of features can be built with just some prompting and an LLM call! After reading this tutorial, you'll have a high level overview of: * Using [language models](/v0.2/docs/concepts/#chat-models) * Using [PromptTemplates](/v0.2/docs/concepts/#prompt-templates) and [OutputParsers](/v0.2/docs/concepts/#output-parsers) * Using [LangChain Expression Language (LCEL)](/v0.2/docs/concepts/#langchain-expression-language-lcel) to chain components together * Debugging and tracing your application using [LangSmith](/v0.2/docs/concepts/#langsmith) * Deploying your application with [LangServe](/v0.2/docs/concepts/#langserve) Let's dive in! Setup[​](#setup "Direct link to Setup") --------------------------------------- ### Jupyter Notebook[​](#jupyter-notebook "Direct link to Jupyter Notebook") This guide (and most of the other guides in the documentation) uses [Jupyter notebooks](https://jupyter.org/) and assumes the reader is as well. Jupyter notebooks are perfect for learning how to work with LLM systems because oftentimes things can go wrong (unexpected output, API down, etc) and going through guides in an interactive environment is a great way to better understand them. This and other tutorials are perhaps most conveniently run in a Jupyter notebook. See [here](https://jupyter.org/install) for instructions on how to install. ### Installation[​](#installation "Direct link to Installation") To install LangChain run: * Pip * Conda pip install langchain conda install langchain -c conda-forge For more details, see our [Installation guide](/v0.2/docs/how_to/installation/). ### LangSmith[​](#langsmith "Direct link to LangSmith") Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with [LangSmith](https://smith.langchain.com). After you sign up at the link above, make sure to set your environment variables to start logging traces: export LANGCHAIN_TRACING_V2="true"export LANGCHAIN_API_KEY="..." Or, if in a notebook, you can set them with: import getpassimport osos.environ["LANGCHAIN_TRACING_V2"] = "true"os.environ["LANGCHAIN_API_KEY"] = getpass.getpass() Using Language Models[​](#using-language-models "Direct link to Using Language Models") --------------------------------------------------------------------------------------- First up, let's learn how to use a language model by itself. LangChain supports many different language models that you can use interchangably - select the one you want to use below! * OpenAI * Anthropic * Azure * Google * Cohere * FireworksAI * Groq * MistralAI * TogetherAI pip install -qU langchain-openai import getpassimport osos.environ["OPENAI_API_KEY"] = getpass.getpass()from langchain_openai import ChatOpenAImodel = ChatOpenAI(model="gpt-4") pip install -qU langchain-anthropic import getpassimport osos.environ["ANTHROPIC_API_KEY"] = getpass.getpass()from langchain_anthropic import ChatAnthropicmodel = ChatAnthropic(model="claude-3-sonnet-20240229") pip install -qU langchain-openai import getpassimport osos.environ["AZURE_OPENAI_API_KEY"] = getpass.getpass()from langchain_openai import AzureChatOpenAImodel = AzureChatOpenAI( azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"], openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],) pip install -qU langchain-google-vertexai import getpassimport osos.environ["GOOGLE_API_KEY"] = getpass.getpass()from langchain_google_vertexai import ChatVertexAImodel = ChatVertexAI(model="gemini-pro") pip install -qU langchain-cohere import getpassimport osos.environ["COHERE_API_KEY"] = getpass.getpass()from langchain_cohere import ChatCoheremodel = ChatCohere(model="command-r") pip install -qU langchain-fireworks import getpassimport osos.environ["FIREWORKS_API_KEY"] = getpass.getpass()from langchain_fireworks import ChatFireworksmodel = ChatFireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct") pip install -qU langchain-groq import getpassimport osos.environ["GROQ_API_KEY"] = getpass.getpass()from langchain_groq import ChatGroqmodel = ChatGroq(model="llama3-8b-8192") pip install -qU langchain-mistralai import getpassimport osos.environ["MISTRAL_API_KEY"] = getpass.getpass()from langchain_mistralai import ChatMistralAImodel = ChatMistralAI(model="mistral-large-latest") pip install -qU langchain-openai import getpassimport osos.environ["TOGETHER_API_KEY"] = getpass.getpass()from langchain_openai import ChatOpenAImodel = ChatOpenAI( base_url="https://api.together.xyz/v1", api_key=os.environ["TOGETHER_API_KEY"], model="mistralai/Mixtral-8x7B-Instruct-v0.1",) Let's first use the model directly. `ChatModel`s are instances of LangChain "Runnables", which means they expose a standard interface for interacting with them. To just simply call the model, we can pass in a list of messages to the `.invoke` method. from langchain_core.messages import HumanMessage, SystemMessagemessages = [ SystemMessage(content="Translate the following from English into Italian"), HumanMessage(content="hi!"),]model.invoke(messages) **API Reference:**[HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) | [SystemMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.system.SystemMessage.html) AIMessage(content='ciao!', response_metadata={'token_usage': {'completion_tokens': 3, 'prompt_tokens': 20, 'total_tokens': 23}, 'model_name': 'gpt-4', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-fc5d7c88-9615-48ab-a3c7-425232b562c5-0') If we've enable LangSmith, we can see that this run is logged to LangSmith, and can see the [LangSmith trace](https://smith.langchain.com/public/88baa0b2-7c1a-4d09-ba30-a47985dde2ea/r) OutputParsers[​](#outputparsers "Direct link to OutputParsers") --------------------------------------------------------------- Notice that the response from the model is an `AIMessage`. This contains a string response along with other metadata about the response. Oftentimes we may just want to work with the string response. We can parse out just this response by using a simple output parser. We first import the simple output parser. from langchain_core.output_parsers import StrOutputParserparser = StrOutputParser() **API Reference:**[StrOutputParser](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.string.StrOutputParser.html) One way to use it is to use it by itself. For example, we could save the result of the language model call and then pass it to the parser. result = model.invoke(messages) parser.invoke(result) 'Ciao!' More commonly, we can "chain" the model with this output parser. This means this output parser will get called every time in this chain. This chain takes on the input type of the language model (string or list of message) and returns the output type of the output parser (string). We can easily create the chain using the `|` operator. The `|` operator is used in LangChain to combine two elements together. chain = model | parser chain.invoke(messages) 'Ciao!' If we now look at LangSmith, we can see that the chain has two steps: first the language model is called, then the result of that is passed to the output parser. We can see the [LangSmith trace](https://smith.langchain.com/public/f1bdf656-2739-42f7-ac7f-0f1dd712322f/r) Prompt Templates[​](#prompt-templates "Direct link to Prompt Templates") ------------------------------------------------------------------------ Right now we are passing a list of messages directly into the language model. Where does this list of messages come from? Usually, it is constructed from a combination of user input and application logic. This application logic usually takes the raw user input and transforms it into a list of messages ready to pass to the language model. Common transformations include adding a system message or formatting a template with the user input. PromptTemplates are a concept in LangChain designed to assist with this transformation. They take in raw user input and return data (a prompt) that is ready to pass into a language model. Let's create a PromptTemplate here. It will take in two user variables: * `language`: The language to translate text into * `text`: The text to translate from langchain_core.prompts import ChatPromptTemplate **API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) First, let's create a string that we will format to be the system message: system_template = "Translate the following into {language}:" Next, we can create the PromptTemplate. This will be a combination of the `system_template` as well as a simpler template for where the put the text prompt_template = ChatPromptTemplate.from_messages( [("system", system_template), ("user", "{text}")]) The input to this prompt template is a dictionary. We can play around with this prompt template by itself to see what it does by itself result = prompt_template.invoke({"language": "italian", "text": "hi"})result ChatPromptValue(messages=[SystemMessage(content='Translate the following into italian:'), HumanMessage(content='hi')]) We can see that it returns a `ChatPromptValue` that consists of two messages. If we want to access the messages directly we do: result.to_messages() [SystemMessage(content='Translate the following into italian:'), HumanMessage(content='hi')] Chaining together components with LCEL[​](#chaining-together-components-with-lcel "Direct link to Chaining together components with LCEL") ------------------------------------------------------------------------------------------------------------------------------------------ We can now combine this with the model and the output parser from above using the pipe (`|`) operator: chain = prompt_template | model | parser chain.invoke({"language": "italian", "text": "hi"}) 'ciao' This is a simple example of using [LangChain Expression Language (LCEL)](/v0.2/docs/concepts/#langchain-expression-language-lcel) to chain together LangChain modules. There are several benefits to this approach, including optimized streaming and tracing support. If we take a look at the LangSmith trace, we can see all three components show up in the [LangSmith trace](https://smith.langchain.com/public/bc49bec0-6b13-4726-967f-dbd3448b786d/r). Serving with LangServe[​](#serving-with-langserve "Direct link to Serving with LangServe") ------------------------------------------------------------------------------------------ Now that we've built an application, we need to serve it. That's where LangServe comes in. LangServe helps developers deploy LangChain chains as a REST API. You do not need to use LangServe to use LangChain, but in this guide we'll show how you can deploy your app with LangServe. While the first part of this guide was intended to be run in a Jupyter Notebook or script, we will now move out of that. We will be creating a Python file and then interacting with it from the command line. Install with: pip install "langserve[all]" ### Server[​](#server "Direct link to Server") To create a server for our application we'll make a `serve.py` file. This will contain our logic for serving our application. It consists of three things: 1. The definition of our chain that we just built above 2. Our FastAPI app 3. A definition of a route from which to serve the chain, which is done with `langserve.add_routes` #!/usr/bin/env pythonfrom typing import Listfrom fastapi import FastAPIfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.output_parsers import StrOutputParserfrom langchain_openai import ChatOpenAIfrom langserve import add_routes# 1. Create prompt templatesystem_template = "Translate the following into {language}:"prompt_template = ChatPromptTemplate.from_messages([ ('system', system_template), ('user', '{text}')])# 2. Create modelmodel = ChatOpenAI()# 3. Create parserparser = StrOutputParser()# 4. Create chainchain = prompt_template | model | parser# 4. App definitionapp = FastAPI( title="LangChain Server", version="1.0", description="A simple API server using LangChain's Runnable interfaces",)# 5. Adding chain routeadd_routes( app, chain, path="/chain",)if __name__ == "__main__": import uvicorn uvicorn.run(app, host="localhost", port=8000) **API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [StrOutputParser](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.string.StrOutputParser.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) And that's it! If we execute this file: python serve.py we should see our chain being served at [http://localhost:8000](http://localhost:8000). ### Playground[​](#playground "Direct link to Playground") Every LangServe service comes with a simple [built-in UI](https://github.com/langchain-ai/langserve/blob/main/README.md#playground) for configuring and invoking the application with streaming output and visibility into intermediate steps. Head to [http://localhost:8000/chain/playground/](http://localhost:8000/chain/playground/) to try it out! Pass in the same inputs as before - `{"language": "italian", "text": "hi"}` - and it should respond same as before. ### Client[​](#client "Direct link to Client") Now let's set up a client for programmatically interacting with our service. We can easily do this with the `[langserve.RemoteRunnable](/docs/langserve/#client)`. Using this, we can interact with the served chain as if it were running client-side. from langserve import RemoteRunnableremote_chain = RemoteRunnable("http://localhost:8000/chain/")remote_chain.invoke({"language": "italian", "text": "hi"}) 'Ciao' To learn more about the many other features of LangServe [head here](/v0.2/docs/langserve/). Conclusion[​](#conclusion "Direct link to Conclusion") ------------------------------------------------------ That's it! In this tutorial you've learned how to create your first simple LLM application. You've learned how to work with language models, how to parse their outputs, how to create a prompt template, chaining them with LCEL, how to get great observability into chains you create with LangSmith, and how to deploy them with LangServe. This just scratches the surface of what you will want to learn to become a proficient AI Engineer. Luckily - we've got a lot of other resources! For further reading on the core concepts of LangChain, we've got detailed [Conceptual Guides](/v0.2/docs/concepts/). If you have more specific questions on these concepts, check out the following sections of the how-to guides: * [LangChain Expression Language (LCEL)](/v0.2/docs/how_to/#langchain-expression-language-lcel) * [Prompt templates](/v0.2/docs/how_to/#prompt-templates) * [Chat models](/v0.2/docs/how_to/#chat-models) * [Output parsers](/v0.2/docs/how_to/#output-parsers) * [LangServe](/v0.2/docs/langserve/) And the LangSmith docs: * [LangSmith](https://docs.smith.langchain.com) [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/tutorials/llm_chain.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous Tutorials ](/v0.2/docs/tutorials/)[ Next Build a Query Analysis System ](/v0.2/docs/tutorials/query_analysis/) * [Setup](#setup) * [Jupyter Notebook](#jupyter-notebook) * [Installation](#installation) * [LangSmith](#langsmith) * [Using Language Models](#using-language-models) * [OutputParsers](#outputparsers) * [Prompt Templates](#prompt-templates) * [Chaining together components with LCEL](#chaining-together-components-with-lcel) * [Serving with LangServe](#serving-with-langserve) * [Server](#server) * [Playground](#playground) * [Client](#client) * [Conclusion](#conclusion)
null
https://python.langchain.com/v0.2/docs/additional_resources/youtube/
On this page YouTube videos ============== \[Updated 2024-05-16\] ### [Official LangChain YouTube channel](https://www.youtube.com/@LangChain)[​](#official-langchain-youtube-channel "Direct link to official-langchain-youtube-channel") ### [Tutorials on YouTube](/v0.2/docs/additional_resources/tutorials/#tutorials)[​](#tutorials-on-youtube "Direct link to tutorials-on-youtube") Videos (sorted by views)[​](#videos-sorted-by-views "Direct link to Videos (sorted by views)") ---------------------------------------------------------------------------------------------- Only videos with 40K+ views: * [Using `ChatGPT` with YOUR OWN Data. This is magical. (LangChain `OpenAI API`)](https://youtu.be/9AXP7tCI9PI) * [Chat with Multiple `PDFs` | LangChain App Tutorial in Python (Free LLMs and Embeddings)](https://youtu.be/dXxQ0LR-3Hg?si=pjXKhsHRzn10vOqX) * [`Hugging Face` + Langchain in 5 mins | Access 200k+ FREE AI models for your AI apps](https://youtu.be/_j7JEDWuqLE?si=psimQscN3qo2dOa9) * [LangChain Crash Course For Beginners | LangChain Tutorial](https://youtu.be/nAmC7SoVLd8?si=qJdvyG5-rnjqfdj1) * [Vector Embeddings Tutorial – Code Your Own AI Assistant with GPT-4 API + LangChain + NLP](https://youtu.be/yfHHvmaMkcA?si=UBP3yw50cLm3a2nj) * [Development with Large Language Models Tutorial – `OpenAI`, Langchain, Agents, `Chroma`](https://youtu.be/xZDB1naRUlk?si=v8J1q6oFHRyTkf7Y) * [Langchain: `PDF` Chat App (GUI) | ChatGPT for Your PDF FILES | Step-by-Step Tutorial](https://youtu.be/RIWbalZ7sTo?si=LbKsCcuyv0BtnrTY) * [Vector Search `RAG` Tutorial – Combine Your Data with LLMs with Advanced Search](https://youtu.be/JEBDfGqrAUA?si=pD7oxpfwWeJCxfBt) * [LangChain Crash Course for Beginners](https://youtu.be/lG7Uxts9SXs?si=Yte4S5afN7KNCw0F) * [Learn `RAG` From Scratch – Python AI Tutorial from a LangChain Engineer](https://youtu.be/sVcwVQRHIc8?si=_LN4g0vOgSdtlB3S) * [`Llama 2` in LangChain — FIRST Open Source Conversational Agent!](https://youtu.be/6iHVJyX2e50?si=rtq1maPrzWKHbwVV) * [LangChain Tutorial for Beginners | Generative AI Series](https://youtu.be/cQUUkZnyoD0?si=KYz-bvcocdqGh9f_) * [Chatbots with `RAG`: LangChain Full Walkthrough](https://youtu.be/LhnCsygAvzY?si=yS7T98VLfcWdkDek) * [LangChain Explained In 15 Minutes - A MUST Learn For Python Programmers](https://youtu.be/mrjq3lFz23s?si=wkQGcSKUJjuiiEPf) * [LLM Project | End to End LLM Project Using Langchain, `OpenAI` in Finance Domain](https://youtu.be/MoqgmWV1fm8?si=oVl-5kJVgd3a07Y_) * [What is LangChain?](https://youtu.be/1bUy-1hGZpI?si=NZ0D51VM5y-DhjGe) * [`RAG` + Langchain Python Project: Easy AI/Chat For Your Doc](https://youtu.be/tcqEUSNCn8I?si=RLcWPBVLIErRqdmU) * [Getting Started With LangChain In 20 Minutes- Build Celebrity Search Application](https://youtu.be/_FpT1cwcSLg?si=X9qVazlXYucN_JBP) * [LangChain GEN AI Tutorial – 6 End-to-End Projects using OpenAI, Google `Gemini Pro`, `LLAMA2`](https://youtu.be/x0AnCE9SE4A?si=_92gJYm7kb-V2bi0) * [Complete Langchain GEN AI Crash Course With 6 End To End LLM Projects With OPENAI, `LLAMA2`, `Gemini Pro`](https://youtu.be/aWKrL4z5H6w?si=NVLi7Yiq0ccE7xXE) * [AI Leader Reveals The Future of AI AGENTS (LangChain CEO)](https://youtu.be/9ZhbA0FHZYc?si=1r4P6kRvKVvEhRgE) * [Learn How To Query Pdf using Langchain Open AI in 5 min](https://youtu.be/5Ghv-F1wF_0?si=ZZRjrWfeiFOVrcvu) * [Reliable, fully local RAG agents with `LLaMA3`](https://youtu.be/-ROS6gfYIts?si=75CXA8W_BbnkIxcV) * [Learn `LangChain.js` - Build LLM apps with JavaScript and `OpenAI`](https://youtu.be/HSZ_uaif57o?si=Icj-RAhwMT-vHaYA) * [LLM Project | End to End LLM Project Using LangChain, Google Palm In Ed-Tech Industry](https://youtu.be/AjQPRomyd-k?si=eC3NT6kn02Lhpz-_) * [Chatbot Answering from Your Own Knowledge Base: Langchain, `ChatGPT`, `Pinecone`, and `Streamlit`: | Code](https://youtu.be/nAKhxQ3hcMA?si=9Zd_Nd_jiYhtml5w) * [LangChain is AMAZING | Quick Python Tutorial](https://youtu.be/I4mFqyqFkxg?si=aJ66qh558OfNAczD) * [`GirlfriendGPT` - AI girlfriend with LangChain](https://youtu.be/LiN3D1QZGQw?si=kZR-lnJwixeVrjmh) * [Using NEW `MPT-7B` in `Hugging Face` and LangChain](https://youtu.be/DXpk9K7DgMo?si=99JDpV_ueimwJhMi) * [LangChain - COMPLETE TUTORIAL - Basics to advanced concept!](https://youtu.be/a89vqgK-Qcs?si=0aVO2EOqsw7GE5e3) * [LangChain Agents: Simply Explained!](https://youtu.be/Xi9Ui-9qcPw?si=DCuG7nGx8dxcfhkx) * [Chat With Multiple `PDF` Documents With Langchain And Google `Gemini Pro`](https://youtu.be/uus5eLz6smA?si=YUwvHtaZsGeIl0WD) * [LLM Project | End to end LLM project Using Langchain, `Google Palm` in Retail Industry](https://youtu.be/4wtrl4hnPT8?si=_eOKPpdLfWu5UXMQ) * [Tutorial | Chat with any Website using Python and Langchain](https://youtu.be/bupx08ZgSFg?si=KRrjYZFnuLsstGwW) * [Prompt Engineering And LLM's With LangChain In One Shot-Generative AI](https://youtu.be/t2bSApmPzU4?si=87vPQQtYEWTyu2Kx) * [Build a Custom Chatbot with `OpenAI`: `GPT-Index` & LangChain | Step-by-Step Tutorial](https://youtu.be/FIDv6nc4CgU?si=gR1u3DUG9lvzBIKK) * [Search Your `PDF` App using Langchain, `ChromaDB`, and Open Source LLM: No OpenAI API (Runs on CPU)](https://youtu.be/rIV1EseKwU4?si=UxZEoXSiPai8fXgl) * [Building a `RAG` application from scratch using Python, LangChain, and the `OpenAI API`](https://youtu.be/BrsocJb-fAo?si=hvkh9iTGzJ-LnsX-) * [Function Calling via `ChatGPT API` - First Look With LangChain](https://youtu.be/0-zlUy7VUjg?si=Vc6LFseckEc6qvuk) * [Private GPT, free deployment! Langchain-Chachat helps you easily play with major mainstream AI models! | Zero Degree Commentary](https://youtu.be/3LLUyaHP-3I?si=AZumEeFXsvqaLl0f) * [Create a ChatGPT clone using `Streamlit` and LangChain](https://youtu.be/IaTiyQ2oYUQ?si=WbgsYmqPDnMidSUK) * [What's next for AI agents ft. LangChain's Harrison Chase](https://youtu.be/pBBe1pk8hf4?si=H4vdBF9nmkNZxiHt) * [`LangFlow`: Build Chatbots without Writing Code - LangChain](https://youtu.be/KJ-ux3hre4s?si=TJuDu4bAlva1myNL) * [Building a LangChain Custom Medical Agent with Memory](https://youtu.be/6UFtRwWnHws?si=wymYad26VgigRkHy) * [`Ollama` meets LangChain](https://youtu.be/k_1pOF1mj8k?si=RlBiCrmaR3s7SnMK) * [End To End LLM Langchain Project using `Pinecone` Vector Database](https://youtu.be/erUfLIi9OFM?si=aHpuHXdIEmAfS4eF) * [`LLaMA2` with LangChain - Basics | LangChain TUTORIAL](https://youtu.be/cIRzwSXB4Rc?si=FUs0OLVJpzKhut0h) * [Understanding `ReACT` with LangChain](https://youtu.be/Eug2clsLtFs?si=imgj534ggxlypS0d) * * * \[Updated 2024-05-16\] [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/additional_resources/youtube.mdx) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). * [Official LangChain YouTube channel](#official-langchain-youtube-channel) * [Tutorials on YouTube](#tutorials-on-youtube) * [Videos (sorted by views)](#videos-sorted-by-views)
null
https://python.langchain.com/v0.2/docs/how_to/markdown_header_metadata_splitter/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to split Markdown by Headers On this page How to split Markdown by Headers ================================ ### Motivation[​](#motivation "Direct link to Motivation") Many chat or Q+A applications involve chunking input documents prior to embedding and vector storage. [These notes](https://www.pinecone.io/learn/chunking-strategies/) from Pinecone provide some useful tips: When a full paragraph or document is embedded, the embedding process considers both the overall context and the relationships between the sentences and phrases within the text. This can result in a more comprehensive vector representation that captures the broader meaning and themes of the text. As mentioned, chunking often aims to keep text with common context together. With this in mind, we might want to specifically honor the structure of the document itself. For example, a markdown file is organized by headers. Creating chunks within specific header groups is an intuitive idea. To address this challenge, we can use [MarkdownHeaderTextSplitter](https://api.python.langchain.com/en/latest/markdown/langchain_text_splitters.markdown.MarkdownHeaderTextSplitter.html). This will split a markdown file by a specified set of headers. For example, if we want to split this markdown: md = '# Foo\n\n ## Bar\n\nHi this is Jim \nHi this is Joe\n\n ## Baz\n\n Hi this is Molly' We can specify the headers to split on: [("#", "Header 1"),("##", "Header 2")] And content is grouped or split by common headers: {'content': 'Hi this is Jim \nHi this is Joe', 'metadata': {'Header 1': 'Foo', 'Header 2': 'Bar'}}{'content': 'Hi this is Molly', 'metadata': {'Header 1': 'Foo', 'Header 2': 'Baz'}} Let's have a look at some examples below. ### Basic usage:[​](#basic-usage "Direct link to Basic usage:") %pip install -qU langchain-text-splitters from langchain_text_splitters import MarkdownHeaderTextSplitter **API Reference:**[MarkdownHeaderTextSplitter](https://api.python.langchain.com/en/latest/markdown/langchain_text_splitters.markdown.MarkdownHeaderTextSplitter.html) markdown_document = "# Foo\n\n ## Bar\n\nHi this is Jim\n\nHi this is Joe\n\n ### Boo \n\n Hi this is Lance \n\n ## Baz\n\n Hi this is Molly"headers_to_split_on = [ ("#", "Header 1"), ("##", "Header 2"), ("###", "Header 3"),]markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on)md_header_splits = markdown_splitter.split_text(markdown_document)md_header_splits [Document(page_content='Hi this is Jim \nHi this is Joe', metadata={'Header 1': 'Foo', 'Header 2': 'Bar'}), Document(page_content='Hi this is Lance', metadata={'Header 1': 'Foo', 'Header 2': 'Bar', 'Header 3': 'Boo'}), Document(page_content='Hi this is Molly', metadata={'Header 1': 'Foo', 'Header 2': 'Baz'})] type(md_header_splits[0]) langchain_core.documents.base.Document By default, `MarkdownHeaderTextSplitter` strips headers being split on from the output chunk's content. This can be disabled by setting `strip_headers = False`. markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on, strip_headers=False)md_header_splits = markdown_splitter.split_text(markdown_document)md_header_splits [Document(page_content='# Foo \n## Bar \nHi this is Jim \nHi this is Joe', metadata={'Header 1': 'Foo', 'Header 2': 'Bar'}), Document(page_content='### Boo \nHi this is Lance', metadata={'Header 1': 'Foo', 'Header 2': 'Bar', 'Header 3': 'Boo'}), Document(page_content='## Baz \nHi this is Molly', metadata={'Header 1': 'Foo', 'Header 2': 'Baz'})] ### How to return Markdown lines as separate documents[​](#how-to-return-markdown-lines-as-separate-documents "Direct link to How to return Markdown lines as separate documents") By default, `MarkdownHeaderTextSplitter` aggregates lines based on the headers specified in `headers_to_split_on`. We can disable this by specifying `return_each_line`: markdown_splitter = MarkdownHeaderTextSplitter( headers_to_split_on, return_each_line=True,)md_header_splits = markdown_splitter.split_text(markdown_document)md_header_splits [Document(page_content='Hi this is Jim', metadata={'Header 1': 'Foo', 'Header 2': 'Bar'}), Document(page_content='Hi this is Joe', metadata={'Header 1': 'Foo', 'Header 2': 'Bar'}), Document(page_content='Hi this is Lance', metadata={'Header 1': 'Foo', 'Header 2': 'Bar', 'Header 3': 'Boo'}), Document(page_content='Hi this is Molly', metadata={'Header 1': 'Foo', 'Header 2': 'Baz'})] Note that here header information is retained in the `metadata` for each document. ### How to constrain chunk size:[​](#how-to-constrain-chunk-size "Direct link to How to constrain chunk size:") Within each markdown group we can then apply any text splitter we want, such as `RecursiveCharacterTextSplitter`, which allows for further control of the chunk size. markdown_document = "# Intro \n\n ## History \n\n Markdown[9] is a lightweight markup language for creating formatted text using a plain-text editor. John Gruber created Markdown in 2004 as a markup language that is appealing to human readers in its source code form.[9] \n\n Markdown is widely used in blogging, instant messaging, online forums, collaborative software, documentation pages, and readme files. \n\n ## Rise and divergence \n\n As Markdown popularity grew rapidly, many Markdown implementations appeared, driven mostly by the need for \n\n additional features such as tables, footnotes, definition lists,[note 1] and Markdown inside HTML blocks. \n\n #### Standardization \n\n From 2012, a group of people, including Jeff Atwood and John MacFarlane, launched what Atwood characterised as a standardisation effort. \n\n ## Implementations \n\n Implementations of Markdown are available for over a dozen programming languages."headers_to_split_on = [ ("#", "Header 1"), ("##", "Header 2"),]# MD splitsmarkdown_splitter = MarkdownHeaderTextSplitter( headers_to_split_on=headers_to_split_on, strip_headers=False)md_header_splits = markdown_splitter.split_text(markdown_document)# Char-level splitsfrom langchain_text_splitters import RecursiveCharacterTextSplitterchunk_size = 250chunk_overlap = 30text_splitter = RecursiveCharacterTextSplitter( chunk_size=chunk_size, chunk_overlap=chunk_overlap)# Splitsplits = text_splitter.split_documents(md_header_splits)splits **API Reference:**[RecursiveCharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.RecursiveCharacterTextSplitter.html) [Document(page_content='# Intro \n## History \nMarkdown[9] is a lightweight markup language for creating formatted text using a plain-text editor. John Gruber created Markdown in 2004 as a markup language that is appealing to human readers in its source code form.[9]', metadata={'Header 1': 'Intro', 'Header 2': 'History'}), Document(page_content='Markdown is widely used in blogging, instant messaging, online forums, collaborative software, documentation pages, and readme files.', metadata={'Header 1': 'Intro', 'Header 2': 'History'}), Document(page_content='## Rise and divergence \nAs Markdown popularity grew rapidly, many Markdown implementations appeared, driven mostly by the need for \nadditional features such as tables, footnotes, definition lists,[note 1] and Markdown inside HTML blocks.', metadata={'Header 1': 'Intro', 'Header 2': 'Rise and divergence'}), Document(page_content='#### Standardization \nFrom 2012, a group of people, including Jeff Atwood and John MacFarlane, launched what Atwood characterised as a standardisation effort.', metadata={'Header 1': 'Intro', 'Header 2': 'Rise and divergence'}), Document(page_content='## Implementations \nImplementations of Markdown are available for over a dozen programming languages.', metadata={'Header 1': 'Intro', 'Header 2': 'Implementations'})] [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/markdown_header_metadata_splitter.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to reorder retrieved results to mitigate the "lost in the middle" effect ](/v0.2/docs/how_to/long_context_reorder/)[ Next How to merge consecutive messages of the same type ](/v0.2/docs/how_to/merge_message_runs/) * [Motivation](#motivation) * [Basic usage:](#basic-usage) * [How to return Markdown lines as separate documents](#how-to-return-markdown-lines-as-separate-documents) * [How to constrain chunk size:](#how-to-constrain-chunk-size)
null
https://python.langchain.com/v0.2/docs/how_to/message_history/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to add message history On this page How to add message history ========================== Prerequisites This guide assumes familiarity with the following concepts: * [LangChain Expression Language (LCEL)](/v0.2/docs/concepts/#langchain-expression-language) * [Chaining runnables](/v0.2/docs/how_to/sequence/) * [Configuring chain parameters at runtime](/v0.2/docs/how_to/configure/) * [Prompt templates](/v0.2/docs/concepts/#prompt-templates) * [Chat Messages](/v0.2/docs/concepts/#message-types) Passing conversation state into and out a chain is vital when building a chatbot. The [`RunnableWithMessageHistory`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html#langchain_core.runnables.history.RunnableWithMessageHistory) class lets us add message history to certain types of chains. It wraps another Runnable and manages the chat message history for it. Specifically, it loads previous messages in the conversation BEFORE passing it to the Runnable, and it saves the generated response as a message AFTER calling the runnable. This class also enables multiple conversations by saving each conversation with a `session_id` - it then expects a `session_id` to be passed in the config when calling the runnable, and uses that to look up the relevant conversation history. ![index_diagram](/v0.2/assets/images/message_history-4c13b8b9363beb4621d605bf6b5a34b4.png) In practice this looks something like: from langchain_core.runnables.history import RunnableWithMessageHistorywith_message_history = RunnableWithMessageHistory( # The underlying runnable runnable, # A function that takes in a session id and returns a memory object get_session_history, # Other parameters that may be needed to align the inputs/outputs # of the Runnable with the memory object ... )with_message_history.invoke( # The same input as before {"ability": "math", "input": "What does cosine mean?"}, # Configuration specifying the `session_id`, # which controls which conversation to load config={"configurable": {"session_id": "abc123"}},) **API Reference:**[RunnableWithMessageHistory](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html) In order to properly set this up there are two main things to consider: 1. How to store and load messages? (this is `get_session_history` in the example above) 2. What is the underlying Runnable you are wrapping and what are its inputs/outputs? (this is `runnable` in the example above, as well any additional parameters you pass to `RunnableWithMessageHistory` to align the inputs/outputs) Let's walk through these pieces (and more) below. How to store and load messages[​](#how-to-store-and-load-messages "Direct link to How to store and load messages") ------------------------------------------------------------------------------------------------------------------ A key part of this is storing and loading messages. When constructing `RunnableWithMessageHistory` you need to pass in a `get_session_history` function. This function should take in a `session_id` and return a `BaseChatMessageHistory` object. **What is `session_id`?** `session_id` is an identifier for the session (conversation) thread that these input messages correspond to. This allows you to maintain several conversations/threads with the same chain at the same time. **What is `BaseChatMessageHistory`?** `BaseChatMessageHistory` is a class that can load and save message objects. It will be called by `RunnableWithMessageHistory` to do exactly that. These classes are usually initialized with a session id. Let's create a `get_session_history` object to use for this example. To keep things simple, we will use a simple SQLiteMessage ! rm memory.db from langchain_community.chat_message_histories import SQLChatMessageHistorydef get_session_history(session_id): return SQLChatMessageHistory(session_id, "sqlite:///memory.db") **API Reference:**[SQLChatMessageHistory](https://api.python.langchain.com/en/latest/chat_message_histories/langchain_community.chat_message_histories.sql.SQLChatMessageHistory.html) Check out the [memory integrations](https://integrations.langchain.com/memory) page for implementations of chat message histories using other providers (Redis, Postgres, etc). What is the runnable you are trying to wrap?[​](#what-is-the-runnable-you-are-trying-to-wrap "Direct link to What is the runnable you are trying to wrap?") ----------------------------------------------------------------------------------------------------------------------------------------------------------- `RunnableWithMessageHistory` can only wrap certain types of Runnables. Specifically, it can be used for any Runnable that takes as input one of: * a sequence of [`BaseMessages`](/v0.2/docs/concepts/#message-types) * a dict with a key that takes a sequence of `BaseMessages` * a dict with a key that takes the latest message(s) as a string or sequence of `BaseMessages`, and a separate key that takes historical messages And returns as output one of * a string that can be treated as the contents of an `AIMessage` * a sequence of `BaseMessage` * a dict with a key that contains a sequence of `BaseMessage` Let's take a look at some examples to see how it works. ### Setup[​](#setup "Direct link to Setup") First we construct a runnable (which here accepts a dict as input and returns a message as output): * OpenAI * Anthropic * Azure * Google * Cohere * FireworksAI * Groq * MistralAI * TogetherAI pip install -qU langchain-openai import getpassimport osos.environ["OPENAI_API_KEY"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI(model="gpt-3.5-turbo-0125") pip install -qU langchain-anthropic import getpassimport osos.environ["ANTHROPIC_API_KEY"] = getpass.getpass()from langchain_anthropic import ChatAnthropicllm = ChatAnthropic(model="claude-3-sonnet-20240229") pip install -qU langchain-openai import getpassimport osos.environ["AZURE_OPENAI_API_KEY"] = getpass.getpass()from langchain_openai import AzureChatOpenAIllm = AzureChatOpenAI( azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"], openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],) pip install -qU langchain-google-vertexai import getpassimport osos.environ["GOOGLE_API_KEY"] = getpass.getpass()from langchain_google_vertexai import ChatVertexAIllm = ChatVertexAI(model="gemini-pro") pip install -qU langchain-cohere import getpassimport osos.environ["COHERE_API_KEY"] = getpass.getpass()from langchain_cohere import ChatCoherellm = ChatCohere(model="command-r") pip install -qU langchain-fireworks import getpassimport osos.environ["FIREWORKS_API_KEY"] = getpass.getpass()from langchain_fireworks import ChatFireworksllm = ChatFireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct") pip install -qU langchain-groq import getpassimport osos.environ["GROQ_API_KEY"] = getpass.getpass()from langchain_groq import ChatGroqllm = ChatGroq(model="llama3-8b-8192") pip install -qU langchain-mistralai import getpassimport osos.environ["MISTRAL_API_KEY"] = getpass.getpass()from langchain_mistralai import ChatMistralAIllm = ChatMistralAI(model="mistral-large-latest") pip install -qU langchain-openai import getpassimport osos.environ["TOGETHER_API_KEY"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI( base_url="https://api.together.xyz/v1", api_key=os.environ["TOGETHER_API_KEY"], model="mistralai/Mixtral-8x7B-Instruct-v0.1",) from langchain_core.messages import HumanMessagefrom langchain_core.runnables.history import RunnableWithMessageHistory **API Reference:**[HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) | [RunnableWithMessageHistory](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html) ### Messages input, message(s) output[​](#messages-input-messages-output "Direct link to Messages input, message(s) output") The simplest form is just adding memory to a ChatModel. ChatModels accept a list of messages as input and output a message. This makes it very easy to use `RunnableWithMessageHistory` - no additional configuration is needed! runnable_with_history = RunnableWithMessageHistory( model, get_session_history,) runnable_with_history.invoke( [HumanMessage(content="hi - im bob!")], config={"configurable": {"session_id": "1"}},) AIMessage(content="It's nice to meet you, Bob! I'm Claude, an AI assistant created by Anthropic. How can I help you today?", response_metadata={'id': 'msg_01UHCCMiZz9yNYjt41xUJrtk', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 12, 'output_tokens': 32}}, id='run-55f6a451-606b-4e04-9e39-e03b81035c1f-0', usage_metadata={'input_tokens': 12, 'output_tokens': 32, 'total_tokens': 44}) runnable_with_history.invoke( [HumanMessage(content="whats my name?")], config={"configurable": {"session_id": "1"}},) AIMessage(content='I\'m afraid I don\'t actually know your name - you introduced yourself as Bob, but I don\'t have any other information about your identity. As an AI assistant, I don\'t have a way to independently verify people\'s names or identities. I\'m happy to continue our conversation, but I\'ll just refer to you as "Bob" since that\'s the name you provided.', response_metadata={'id': 'msg_018L96tAxiexMKsHBQz22CcE', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 52, 'output_tokens': 80}}, id='run-7399ddb5-bb06-444b-bfb2-2f65674105dd-0', usage_metadata={'input_tokens': 52, 'output_tokens': 80, 'total_tokens': 132}) info Note that in this case the context is preserved via the chat history for the provided `session_id`, so the model knows the users name. We can now try this with a new session id and see that it does not remember. runnable_with_history.invoke( [HumanMessage(content="whats my name?")], config={"configurable": {"session_id": "1a"}},) AIMessage(content="I'm afraid I don't actually know your name. As an AI assistant, I don't have personal information about you unless you provide it to me directly.", response_metadata={'id': 'msg_01LhbWu7mSKTvKAx7iQpMPzd', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 12, 'output_tokens': 35}}, id='run-cf86cad2-21f2-4525-afc8-09bfd1e8af70-0', usage_metadata={'input_tokens': 12, 'output_tokens': 35, 'total_tokens': 47}) info When we pass a different `session_id`, we start a new chat history, so the model does not know what the user's name is. ### Dictionary input, message(s) output[​](#dictionary-input-messages-output "Direct link to Dictionary input, message(s) output") Besides just wrapping a raw model, the next step up is wrapping a prompt + LLM. This now changes the input to be a **dictionary** (because the input to a prompt is a dictionary). This adds two bits of complication. First: a dictionary can have multiple keys, but we only want to save ONE as input. In order to do this, we now now need to specify a key to save as the input. Second: once we load the messages, we need to know how to save them to the dictionary. That equates to know which key in the dictionary to save them in. Therefore, we need to specify a key to save the loaded messages in. Putting it all together, that ends up looking something like: from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholderprompt = ChatPromptTemplate.from_messages( [ ( "system", "You're an assistant who speaks in {language}. Respond in 20 words or fewer", ), MessagesPlaceholder(variable_name="history"), ("human", "{input}"), ])runnable = prompt | modelrunnable_with_history = RunnableWithMessageHistory( runnable, get_session_history, input_messages_key="input", history_messages_key="history",) **API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [MessagesPlaceholder](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.MessagesPlaceholder.html) info Note that we've specified `input_messages_key` (the key to be treated as the latest input message) and `history_messages_key` (the key to add historical messages to). runnable_with_history.invoke( {"language": "italian", "input": "hi im bob!"}, config={"configurable": {"session_id": "2"}},) AIMessage(content='Ciao Bob! È un piacere conoscerti. Come stai oggi?', response_metadata={'id': 'msg_0121ADUEe4G1hMC6zbqFWofr', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 29, 'output_tokens': 23}}, id='run-246a70df-aad6-43d6-a7e8-166d96e0d67e-0', usage_metadata={'input_tokens': 29, 'output_tokens': 23, 'total_tokens': 52}) runnable_with_history.invoke( {"language": "italian", "input": "whats my name?"}, config={"configurable": {"session_id": "2"}},) AIMessage(content='Bob, il tuo nome è Bob.', response_metadata={'id': 'msg_01EDUZG6nRLGeti9KhFN5cek', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 60, 'output_tokens': 12}}, id='run-294b4a72-81bc-4c43-b199-3aafdff87cb3-0', usage_metadata={'input_tokens': 60, 'output_tokens': 12, 'total_tokens': 72}) info Note that in this case the context is preserved via the chat history for the provided `session_id`, so the model knows the users name. We can now try this with a new session id and see that it does not remember. runnable_with_history.invoke( {"language": "italian", "input": "whats my name?"}, config={"configurable": {"session_id": "2a"}},) AIMessage(content='Mi dispiace, non so il tuo nome. Come posso aiutarti?', response_metadata={'id': 'msg_01Lyd9FAGQJTxxAZoFi3sQpQ', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 30, 'output_tokens': 23}}, id='run-19a82197-3b1c-4b5f-a68d-f91f4a2ba523-0', usage_metadata={'input_tokens': 30, 'output_tokens': 23, 'total_tokens': 53}) info When we pass a different `session_id`, we start a new chat history, so the model does not know what the user's name is. ### Messages input, dict output[​](#messages-input-dict-output "Direct link to Messages input, dict output") This format is useful when you are using a model to generate one key in a dictionary. from langchain_core.messages import HumanMessagefrom langchain_core.runnables import RunnableParallelchain = RunnableParallel({"output_message": model})runnable_with_history = RunnableWithMessageHistory( chain, get_session_history, output_messages_key="output_message",) **API Reference:**[HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) | [RunnableParallel](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html) info Note that we've specified `output_messages_key` (the key to be treated as the output to save). runnable_with_history.invoke( [HumanMessage(content="hi - im bob!")], config={"configurable": {"session_id": "3"}},) {'output_message': AIMessage(content="It's nice to meet you, Bob! I'm Claude, an AI assistant created by Anthropic. How can I help you today?", response_metadata={'id': 'msg_01WWJSyUyGGKuBqTs3h18ZMM', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 12, 'output_tokens': 32}}, id='run-0f50cb43-a734-447c-b535-07c615a0984c-0', usage_metadata={'input_tokens': 12, 'output_tokens': 32, 'total_tokens': 44})} runnable_with_history.invoke( [HumanMessage(content="whats my name?")], config={"configurable": {"session_id": "3"}},) {'output_message': AIMessage(content='I\'m afraid I don\'t actually know your name - you introduced yourself as Bob, but I don\'t have any other information about your identity. As an AI assistant, I don\'t have a way to independently verify people\'s names or identities. I\'m happy to continue our conversation, but I\'ll just refer to you as "Bob" since that\'s the name you provided.', response_metadata={'id': 'msg_01TEGrhfLXTwo36rC7svdTy4', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 52, 'output_tokens': 80}}, id='run-178e8f3f-da21-430d-9edc-ef07797a5e2d-0', usage_metadata={'input_tokens': 52, 'output_tokens': 80, 'total_tokens': 132})} info Note that in this case the context is preserved via the chat history for the provided `session_id`, so the model knows the users name. We can now try this with a new session id and see that it does not remember. runnable_with_history.invoke( [HumanMessage(content="whats my name?")], config={"configurable": {"session_id": "3a"}},) {'output_message': AIMessage(content="I'm afraid I don't actually know your name. As an AI assistant, I don't have personal information about you unless you provide it to me directly.", response_metadata={'id': 'msg_0118ZBudDXAC9P6smf91NhCX', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 12, 'output_tokens': 35}}, id='run-deb14a3a-0336-42b4-8ace-ad1e52ca5910-0', usage_metadata={'input_tokens': 12, 'output_tokens': 35, 'total_tokens': 47})} info When we pass a different `session_id`, we start a new chat history, so the model does not know what the user's name is. ### Dict with single key for all messages input, messages output[​](#dict-with-single-key-for-all-messages-input-messages-output "Direct link to Dict with single key for all messages input, messages output") This is a specific case of "Dictionary input, message(s) output". In this situation, because there is only a single key we don't need to specify as much - we only need to specify the `input_messages_key`. from operator import itemgetterrunnable_with_history = RunnableWithMessageHistory( itemgetter("input_messages") | model, get_session_history, input_messages_key="input_messages",) info Note that we've specified `input_messages_key` (the key to be treated as the latest input message). runnable_with_history.invoke( {"input_messages": [HumanMessage(content="hi - im bob!")]}, config={"configurable": {"session_id": "4"}},) AIMessage(content="It's nice to meet you, Bob! I'm Claude, an AI assistant created by Anthropic. How can I help you today?", response_metadata={'id': 'msg_01UdD5wz1J5xwoz5D94onaQC', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 12, 'output_tokens': 32}}, id='run-91bee6eb-0814-4557-ad71-fef9b0270358-0', usage_metadata={'input_tokens': 12, 'output_tokens': 32, 'total_tokens': 44}) runnable_with_history.invoke( {"input_messages": [HumanMessage(content="whats my name?")]}, config={"configurable": {"session_id": "4"}},) AIMessage(content='I\'m afraid I don\'t actually know your name - you introduced yourself as Bob, but I don\'t have any other information about your identity. As an AI assistant, I don\'t have a way to independently verify people\'s names or identities. I\'m happy to continue our conversation, but I\'ll just refer to you as "Bob" since that\'s the name you provided.', response_metadata={'id': 'msg_012WUygxBKXcVJPeTW14LNrc', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 52, 'output_tokens': 80}}, id='run-fcbaaa1a-8c33-4eec-b0b0-5b800a47bddd-0', usage_metadata={'input_tokens': 52, 'output_tokens': 80, 'total_tokens': 132}) info Note that in this case the context is preserved via the chat history for the provided `session_id`, so the model knows the users name. We can now try this with a new session id and see that it does not remember. runnable_with_history.invoke( {"input_messages": [HumanMessage(content="whats my name?")]}, config={"configurable": {"session_id": "4a"}},) AIMessage(content="I'm afraid I don't actually know your name. As an AI assistant, I don't have personal information about you unless you provide it to me directly.", response_metadata={'id': 'msg_017xW3Ki5y4UBYzCU9Mf1pgM', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 12, 'output_tokens': 35}}, id='run-d2f372f7-3679-4a5c-9331-a55b820ec03e-0', usage_metadata={'input_tokens': 12, 'output_tokens': 35, 'total_tokens': 47}) info When we pass a different `session_id`, we start a new chat history, so the model does not know what the user's name is. Customization[​](#customization "Direct link to Customization") --------------------------------------------------------------- The configuration parameters by which we track message histories can be customized by passing in a list of `ConfigurableFieldSpec` objects to the `history_factory_config` parameter. Below, we use two parameters: a `user_id` and `conversation_id`. from langchain_core.runnables import ConfigurableFieldSpecdef get_session_history(user_id: str, conversation_id: str): return SQLChatMessageHistory(f"{user_id}--{conversation_id}", "sqlite:///memory.db")with_message_history = RunnableWithMessageHistory( runnable, get_session_history, input_messages_key="input", history_messages_key="history", history_factory_config=[ ConfigurableFieldSpec( id="user_id", annotation=str, name="User ID", description="Unique identifier for the user.", default="", is_shared=True, ), ConfigurableFieldSpec( id="conversation_id", annotation=str, name="Conversation ID", description="Unique identifier for the conversation.", default="", is_shared=True, ), ],)with_message_history.invoke( {"language": "italian", "input": "hi im bob!"}, config={"configurable": {"user_id": "123", "conversation_id": "1"}},) **API Reference:**[ConfigurableFieldSpec](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.utils.ConfigurableFieldSpec.html) AIMessage(content='Ciao Bob! È un piacere conoscerti. Come stai oggi?', response_metadata={'id': 'msg_016RJebCoiAgWaNcbv9wrMNW', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 29, 'output_tokens': 23}}, id='run-40425414-8f72-47d4-bf1d-a84175d8b3f8-0', usage_metadata={'input_tokens': 29, 'output_tokens': 23, 'total_tokens': 52}) # rememberswith_message_history.invoke( {"language": "italian", "input": "whats my name?"}, config={"configurable": {"user_id": "123", "conversation_id": "1"}},) AIMessage(content='Bob, il tuo nome è Bob.', response_metadata={'id': 'msg_01Kktiy3auFDKESY54KtTWPX', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 60, 'output_tokens': 12}}, id='run-c7768420-3f30-43f5-8834-74b1979630dd-0', usage_metadata={'input_tokens': 60, 'output_tokens': 12, 'total_tokens': 72}) # New user_id --> does not rememberwith_message_history.invoke( {"language": "italian", "input": "whats my name?"}, config={"configurable": {"user_id": "456", "conversation_id": "1"}},) AIMessage(content='Mi dispiace, non so il tuo nome. Come posso aiutarti?', response_metadata={'id': 'msg_0178FpbpPNioB7kqvyHk7rjD', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 30, 'output_tokens': 23}}, id='run-df1f1768-aab6-4aec-8bba-e33fc9e90b8d-0', usage_metadata={'input_tokens': 30, 'output_tokens': 23, 'total_tokens': 53}) Note that in this case the context was preserved for the same `user_id`, but once we changed it, the new chat history was started, even though the `conversation_id` was the same. [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/message_history.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to merge consecutive messages of the same type ](/v0.2/docs/how_to/merge_message_runs/)[ Next How to migrate from legacy LangChain agents to LangGraph ](/v0.2/docs/how_to/migrate_agent/) * [How to store and load messages](#how-to-store-and-load-messages) * [What is the runnable you are trying to wrap?](#what-is-the-runnable-you-are-trying-to-wrap) * [Setup](#setup) * [Messages input, message(s) output](#messages-input-messages-output) * [Dictionary input, message(s) output](#dictionary-input-messages-output) * [Messages input, dict output](#messages-input-dict-output) * [Dict with single key for all messages input, messages output](#dict-with-single-key-for-all-messages-input-messages-output) * [Customization](#customization)
null
https://python.langchain.com/v0.2/docs/tutorials/graph/
* [](/v0.2/) * [Tutorials](/v0.2/docs/tutorials/) * Build a Question Answering application over a Graph Database On this page Build a Question Answering application over a Graph Database ============================================================ In this guide we'll go over the basic ways to create a Q&A chain over a graph database. These systems will allow us to ask a question about the data in a graph database and get back a natural language answer. ⚠️ Security note ⚠️[​](#️-security-note-️ "Direct link to ⚠️ Security note ⚠️") ------------------------------------------------------------------------------- Building Q&A systems of graph databases requires executing model-generated graph queries. There are inherent risks in doing this. Make sure that your database connection permissions are always scoped as narrowly as possible for your chain/agent's needs. This will mitigate though not eliminate the risks of building a model-driven system. For more on general security best practices, [see here](/v0.2/docs/security/). Architecture[​](#architecture "Direct link to Architecture") ------------------------------------------------------------ At a high-level, the steps of most graph chains are: 1. **Convert question to a graph database query**: Model converts user input to a graph database query (e.g. Cypher). 2. **Execute graph database query**: Execute the graph database query. 3. **Answer the question**: Model responds to user input using the query results. ![sql_usecase.png](/v0.2/assets/images/graph_usecase-34d891523e6284bb6230b38c5f8392e5.png) Setup[​](#setup "Direct link to Setup") --------------------------------------- First, get required packages and set environment variables. In this example, we will be using Neo4j graph database. %pip install --upgrade --quiet langchain langchain-community langchain-openai neo4j We default to OpenAI models in this guide. import getpassimport osos.environ["OPENAI_API_KEY"] = getpass.getpass()# Uncomment the below to use LangSmith. Not required.# os.environ["LANGCHAIN_API_KEY"] = getpass.getpass()# os.environ["LANGCHAIN_TRACING_V2"] = "true" ········ Next, we need to define Neo4j credentials. Follow [these installation steps](https://neo4j.com/docs/operations-manual/current/installation/) to set up a Neo4j database. os.environ["NEO4J_URI"] = "bolt://localhost:7687"os.environ["NEO4J_USERNAME"] = "neo4j"os.environ["NEO4J_PASSWORD"] = "password" The below example will create a connection with a Neo4j database and will populate it with example data about movies and their actors. from langchain_community.graphs import Neo4jGraphgraph = Neo4jGraph()# Import movie informationmovies_query = """LOAD CSV WITH HEADERS FROM 'https://raw.githubusercontent.com/tomasonjo/blog-datasets/main/movies/movies_small.csv'AS rowMERGE (m:Movie {id:row.movieId})SET m.released = date(row.released), m.title = row.title, m.imdbRating = toFloat(row.imdbRating)FOREACH (director in split(row.director, '|') | MERGE (p:Person {name:trim(director)}) MERGE (p)-[:DIRECTED]->(m))FOREACH (actor in split(row.actors, '|') | MERGE (p:Person {name:trim(actor)}) MERGE (p)-[:ACTED_IN]->(m))FOREACH (genre in split(row.genres, '|') | MERGE (g:Genre {name:trim(genre)}) MERGE (m)-[:IN_GENRE]->(g))"""graph.query(movies_query) **API Reference:**[Neo4jGraph](https://api.python.langchain.com/en/latest/graphs/langchain_community.graphs.neo4j_graph.Neo4jGraph.html) [] Graph schema[​](#graph-schema "Direct link to Graph schema") ------------------------------------------------------------ In order for an LLM to be able to generate a Cypher statement, it needs information about the graph schema. When you instantiate a graph object, it retrieves the information about the graph schema. If you later make any changes to the graph, you can run the `refresh_schema` method to refresh the schema information. graph.refresh_schema()print(graph.schema) Node properties are the following:Movie {imdbRating: FLOAT, id: STRING, released: DATE, title: STRING},Person {name: STRING},Genre {name: STRING},Chunk {id: STRING, question: STRING, query: STRING, text: STRING, embedding: LIST}Relationship properties are the following:The relationships are the following:(:Movie)-[:IN_GENRE]->(:Genre),(:Person)-[:DIRECTED]->(:Movie),(:Person)-[:ACTED_IN]->(:Movie) Great! We've got a graph database that we can query. Now let's try hooking it up to an LLM. Chain[​](#chain "Direct link to Chain") --------------------------------------- Let's use a simple chain that takes a question, turns it into a Cypher query, executes the query, and uses the result to answer the original question. ![graph_chain.webp](/v0.2/assets/images/graph_chain-6379941793e0fa985e51e4bda0329403.webp) LangChain comes with a built-in chain for this workflow that is designed to work with Neo4j: [GraphCypherQAChain](/v0.2/docs/integrations/graphs/neo4j_cypher/) from langchain.chains import GraphCypherQAChainfrom langchain_openai import ChatOpenAIllm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)chain = GraphCypherQAChain.from_llm(graph=graph, llm=llm, verbose=True)response = chain.invoke({"query": "What was the cast of the Casino?"})response **API Reference:**[GraphCypherQAChain](https://api.python.langchain.com/en/latest/chains/langchain_community.chains.graph_qa.cypher.GraphCypherQAChain.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) > Entering new GraphCypherQAChain chain...Generated Cypher:MATCH (:Movie {title: "Casino"})<-[:ACTED_IN]-(actor:Person)RETURN actor.nameFull Context:[{'actor.name': 'Joe Pesci'}, {'actor.name': 'Robert De Niro'}, {'actor.name': 'Sharon Stone'}, {'actor.name': 'James Woods'}]> Finished chain. {'query': 'What was the cast of the Casino?', 'result': 'The cast of Casino included Joe Pesci, Robert De Niro, Sharon Stone, and James Woods.'} Validating relationship direction ================================= LLMs can struggle with relationship directions in generated Cypher statement. Since the graph schema is predefined, we can validate and optionally correct relationship directions in the generated Cypher statements by using the `validate_cypher` parameter. chain = GraphCypherQAChain.from_llm( graph=graph, llm=llm, verbose=True, validate_cypher=True)response = chain.invoke({"query": "What was the cast of the Casino?"})response > Entering new GraphCypherQAChain chain...Generated Cypher:MATCH (:Movie {title: "Casino"})<-[:ACTED_IN]-(actor:Person)RETURN actor.nameFull Context:[{'actor.name': 'Joe Pesci'}, {'actor.name': 'Robert De Niro'}, {'actor.name': 'Sharon Stone'}, {'actor.name': 'James Woods'}]> Finished chain. {'query': 'What was the cast of the Casino?', 'result': 'The cast of Casino included Joe Pesci, Robert De Niro, Sharon Stone, and James Woods.'} ### Next steps[​](#next-steps "Direct link to Next steps") For more complex query-generation, we may want to create few-shot prompts or add query-checking steps. For advanced techniques like this and more check out: * [Prompting strategies](/v0.2/docs/how_to/graph_prompting/): Advanced prompt engineering techniques. * [Mapping values](/v0.2/docs/how_to/graph_mapping/): Techniques for mapping values from questions to database. * [Semantic layer](/v0.2/docs/how_to/graph_semantic/): Techniques for implementing semantic layers. * [Constructing graphs](/v0.2/docs/how_to/graph_constructing/): Techniques for constructing knowledge graphs. [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/tutorials/graph.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous Tutorials ](/v0.2/docs/tutorials/)[ Next Tutorials ](/v0.2/docs/tutorials/) * [⚠️ Security note ⚠️](#️-security-note-️) * [Architecture](#architecture) * [Setup](#setup) * [Graph schema](#graph-schema) * [Chain](#chain) * [Next steps](#next-steps)
null
https://python.langchain.com/v0.2/docs/how_to/multi_vector/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to retrieve using multiple vectors per document On this page How to retrieve using multiple vectors per document =================================================== It can often be useful to store multiple vectors per document. There are multiple use cases where this is beneficial. For example, we can embed multiple chunks of a document and associate those embeddings with the parent document, allowing retriever hits on the chunks to return the larger document. LangChain implements a base [MultiVectorRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.multi_vector.MultiVectorRetriever.html), which simplifies this process. Much of the complexity lies in how to create the multiple vectors per document. This notebook covers some of the common ways to create those vectors and use the `MultiVectorRetriever`. The methods to create multiple vectors per document include: * Smaller chunks: split a document into smaller chunks, and embed those (this is [ParentDocumentRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.parent_document_retriever.ParentDocumentRetriever.html)). * Summary: create a summary for each document, embed that along with (or instead of) the document. * Hypothetical questions: create hypothetical questions that each document would be appropriate to answer, embed those along with (or instead of) the document. Note that this also enables another method of adding embeddings - manually. This is useful because you can explicitly add questions or queries that should lead to a document being recovered, giving you more control. Below we walk through an example. First we instantiate some documents. We will index them in an (in-memory) [Chroma](/v0.2/docs/integrations/providers/chroma/) vector store using [OpenAI](https://python.langchain.com/v0.2/docs/integrations/text_embedding/openai/) embeddings, but any LangChain vector store or embeddings model will suffice. %pip install --upgrade --quiet langchain-chroma langchain langchain-openai > /dev/null from langchain.storage import InMemoryByteStorefrom langchain_chroma import Chromafrom langchain_community.document_loaders import TextLoaderfrom langchain_openai import OpenAIEmbeddingsfrom langchain_text_splitters import RecursiveCharacterTextSplitterloaders = [ TextLoader("paul_graham_essay.txt"), TextLoader("state_of_the_union.txt"),]docs = []for loader in loaders: docs.extend(loader.load())text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000)docs = text_splitter.split_documents(docs)# The vectorstore to use to index the child chunksvectorstore = Chroma( collection_name="full_documents", embedding_function=OpenAIEmbeddings()) **API Reference:**[InMemoryByteStore](https://api.python.langchain.com/en/latest/stores/langchain_core.stores.InMemoryByteStore.html) | [TextLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.text.TextLoader.html) | [OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) | [RecursiveCharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.RecursiveCharacterTextSplitter.html) Smaller chunks[​](#smaller-chunks "Direct link to Smaller chunks") ------------------------------------------------------------------ Often times it can be useful to retrieve larger chunks of information, but embed smaller chunks. This allows for embeddings to capture the semantic meaning as closely as possible, but for as much context as possible to be passed downstream. Note that this is what the [ParentDocumentRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.parent_document_retriever.ParentDocumentRetriever.html) does. Here we show what is going on under the hood. We will make a distinction between the vector store, which indexes embeddings of the (sub) documents, and the document store, which houses the "parent" documents and associates them with an identifier. import uuidfrom langchain.retrievers.multi_vector import MultiVectorRetriever# The storage layer for the parent documentsstore = InMemoryByteStore()id_key = "doc_id"# The retriever (empty to start)retriever = MultiVectorRetriever( vectorstore=vectorstore, byte_store=store, id_key=id_key,)doc_ids = [str(uuid.uuid4()) for _ in docs] **API Reference:**[MultiVectorRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.multi_vector.MultiVectorRetriever.html) We next generate the "sub" documents by splitting the original documents. Note that we store the document identifier in the `metadata` of the corresponding [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) object. # The splitter to use to create smaller chunkschild_text_splitter = RecursiveCharacterTextSplitter(chunk_size=400)sub_docs = []for i, doc in enumerate(docs): _id = doc_ids[i] _sub_docs = child_text_splitter.split_documents([doc]) for _doc in _sub_docs: _doc.metadata[id_key] = _id sub_docs.extend(_sub_docs) Finally, we index the documents in our vector store and document store: retriever.vectorstore.add_documents(sub_docs)retriever.docstore.mset(list(zip(doc_ids, docs))) The vector store alone will retrieve small chunks: retriever.vectorstore.similarity_search("justice breyer")[0] Document(page_content='Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court.', metadata={'doc_id': '064eca46-a4c4-4789-8e3b-583f9597e54f', 'source': 'state_of_the_union.txt'}) Whereas the retriever will return the larger parent document: len(retriever.invoke("justice breyer")[0].page_content) 9875 The default search type the retriever performs on the vector database is a similarity search. LangChain vector stores also support searching via [Max Marginal Relevance](https://api.python.langchain.com/en/latest/vectorstores/langchain_core.vectorstores.VectorStore.html#langchain_core.vectorstores.VectorStore.max_marginal_relevance_search). This can be controlled via the `search_type` parameter of the retriever: from langchain.retrievers.multi_vector import SearchTyperetriever.search_type = SearchType.mmrlen(retriever.invoke("justice breyer")[0].page_content) **API Reference:**[SearchType](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.multi_vector.SearchType.html) 9875 Associating summaries with a document for retrieval[​](#associating-summaries-with-a-document-for-retrieval "Direct link to Associating summaries with a document for retrieval") --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- A summary may be able to distill more accurately what a chunk is about, leading to better retrieval. Here we show how to create summaries, and then embed those. We construct a simple [chain](/v0.2/docs/how_to/sequence/) that will receive an input [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) object and generate a summary using a LLM. * OpenAI * Anthropic * Azure * Google * Cohere * FireworksAI * Groq * MistralAI * TogetherAI pip install -qU langchain-openai import getpassimport osos.environ["OPENAI_API_KEY"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI(model="gpt-3.5-turbo-0125") pip install -qU langchain-anthropic import getpassimport osos.environ["ANTHROPIC_API_KEY"] = getpass.getpass()from langchain_anthropic import ChatAnthropicllm = ChatAnthropic(model="claude-3-sonnet-20240229") pip install -qU langchain-openai import getpassimport osos.environ["AZURE_OPENAI_API_KEY"] = getpass.getpass()from langchain_openai import AzureChatOpenAIllm = AzureChatOpenAI( azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"], openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],) pip install -qU langchain-google-vertexai import getpassimport osos.environ["GOOGLE_API_KEY"] = getpass.getpass()from langchain_google_vertexai import ChatVertexAIllm = ChatVertexAI(model="gemini-pro") pip install -qU langchain-cohere import getpassimport osos.environ["COHERE_API_KEY"] = getpass.getpass()from langchain_cohere import ChatCoherellm = ChatCohere(model="command-r") pip install -qU langchain-fireworks import getpassimport osos.environ["FIREWORKS_API_KEY"] = getpass.getpass()from langchain_fireworks import ChatFireworksllm = ChatFireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct") pip install -qU langchain-groq import getpassimport osos.environ["GROQ_API_KEY"] = getpass.getpass()from langchain_groq import ChatGroqllm = ChatGroq(model="llama3-8b-8192") pip install -qU langchain-mistralai import getpassimport osos.environ["MISTRAL_API_KEY"] = getpass.getpass()from langchain_mistralai import ChatMistralAIllm = ChatMistralAI(model="mistral-large-latest") pip install -qU langchain-openai import getpassimport osos.environ["TOGETHER_API_KEY"] = getpass.getpass()from langchain_openai import ChatOpenAIllm = ChatOpenAI( base_url="https://api.together.xyz/v1", api_key=os.environ["TOGETHER_API_KEY"], model="mistralai/Mixtral-8x7B-Instruct-v0.1",) import uuidfrom langchain_core.documents import Documentfrom langchain_core.output_parsers import StrOutputParserfrom langchain_core.prompts import ChatPromptTemplatechain = ( {"doc": lambda x: x.page_content} | ChatPromptTemplate.from_template("Summarize the following document:\n\n{doc}") | llm | StrOutputParser()) **API Reference:**[Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) | [StrOutputParser](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.string.StrOutputParser.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) Note that we can [batch](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable) the chain accross documents: summaries = chain.batch(docs, {"max_concurrency": 5}) We can then initialize a `MultiVectorRetriever` as before, indexing the summaries in our vector store, and retaining the original documents in our document store: # The vectorstore to use to index the child chunksvectorstore = Chroma(collection_name="summaries", embedding_function=OpenAIEmbeddings())# The storage layer for the parent documentsstore = InMemoryByteStore()id_key = "doc_id"# The retriever (empty to start)retriever = MultiVectorRetriever( vectorstore=vectorstore, byte_store=store, id_key=id_key,)doc_ids = [str(uuid.uuid4()) for _ in docs]summary_docs = [ Document(page_content=s, metadata={id_key: doc_ids[i]}) for i, s in enumerate(summaries)]retriever.vectorstore.add_documents(summary_docs)retriever.docstore.mset(list(zip(doc_ids, docs))) # # We can also add the original chunks to the vectorstore if we so want# for i, doc in enumerate(docs):# doc.metadata[id_key] = doc_ids[i]# retriever.vectorstore.add_documents(docs) Querying the vector store will return summaries: sub_docs = retriever.vectorstore.similarity_search("justice breyer")sub_docs[0] Document(page_content="President Biden recently nominated Judge Ketanji Brown Jackson to serve on the United States Supreme Court, emphasizing her qualifications and broad support. The President also outlined a plan to secure the border, fix the immigration system, protect women's rights, support LGBTQ+ Americans, and advance mental health services. He highlighted the importance of bipartisan unity in passing legislation, such as the Violence Against Women Act. The President also addressed supporting veterans, particularly those impacted by exposure to burn pits, and announced plans to expand benefits for veterans with respiratory cancers. Additionally, he proposed a plan to end cancer as we know it through the Cancer Moonshot initiative. President Biden expressed optimism about the future of America and emphasized the strength of the American people in overcoming challenges.", metadata={'doc_id': '84015b1b-980e-400a-94d8-cf95d7e079bd'}) Whereas the retriever will return the larger source document: retrieved_docs = retriever.invoke("justice breyer")len(retrieved_docs[0].page_content) 9194 Hypothetical Queries[​](#hypothetical-queries "Direct link to Hypothetical Queries") ------------------------------------------------------------------------------------ An LLM can also be used to generate a list of hypothetical questions that could be asked of a particular document, which might bear close semantic similarity to relevant queries in a [RAG](/v0.2/docs/tutorials/rag/) application. These questions can then be embedded and associated with the documents to improve retrieval. Below, we use the [with\_structured\_output](/v0.2/docs/how_to/structured_output/) method to structure the LLM output into a list of strings. from typing import Listfrom langchain_core.pydantic_v1 import BaseModel, Fieldclass HypotheticalQuestions(BaseModel): """Generate hypothetical questions.""" questions: List[str] = Field(..., description="List of questions")chain = ( {"doc": lambda x: x.page_content} # Only asking for 3 hypothetical questions, but this could be adjusted | ChatPromptTemplate.from_template( "Generate a list of exactly 3 hypothetical questions that the below document could be used to answer:\n\n{doc}" ) | ChatOpenAI(max_retries=0, model="gpt-4o").with_structured_output( HypotheticalQuestions ) | (lambda x: x.questions)) Invoking the chain on a single document demonstrates that it outputs a list of questions: chain.invoke(docs[0]) ["What impact did the IBM 1401 have on the author's early programming experiences?", "How did the transition from using the IBM 1401 to microcomputers influence the author's programming journey?", "What role did Lisp play in shaping the author's understanding and approach to AI?"] We can batch then batch the chain over all documents and assemble our vector store and document store as before: # Batch chain over documents to generate hypothetical questionshypothetical_questions = chain.batch(docs, {"max_concurrency": 5})# The vectorstore to use to index the child chunksvectorstore = Chroma( collection_name="hypo-questions", embedding_function=OpenAIEmbeddings())# The storage layer for the parent documentsstore = InMemoryByteStore()id_key = "doc_id"# The retriever (empty to start)retriever = MultiVectorRetriever( vectorstore=vectorstore, byte_store=store, id_key=id_key,)doc_ids = [str(uuid.uuid4()) for _ in docs]# Generate Document objects from hypothetical questionsquestion_docs = []for i, question_list in enumerate(hypothetical_questions): question_docs.extend( [Document(page_content=s, metadata={id_key: doc_ids[i]}) for s in question_list] )retriever.vectorstore.add_documents(question_docs)retriever.docstore.mset(list(zip(doc_ids, docs))) Note that querying the underlying vector store will retrieve hypothetical questions that are semantically similar to the input query: sub_docs = retriever.vectorstore.similarity_search("justice breyer")sub_docs [Document(page_content='What might be the potential benefits of nominating Circuit Court of Appeals Judge Ketanji Brown Jackson to the United States Supreme Court?', metadata={'doc_id': '43292b74-d1b8-4200-8a8b-ea0cb57fbcdb'}), Document(page_content='How might the Bipartisan Infrastructure Law impact the economic competition between the U.S. and China?', metadata={'doc_id': '66174780-d00c-4166-9791-f0069846e734'}), Document(page_content='What factors led to the creation of Y Combinator?', metadata={'doc_id': '72003c4e-4cc9-4f09-a787-0b541a65b38c'}), Document(page_content='How did the ability to publish essays online change the landscape for writers and thinkers?', metadata={'doc_id': 'e8d2c648-f245-4bcc-b8d3-14e64a164b64'})] And invoking the retriever will return the corresponding document: retrieved_docs = retriever.invoke("justice breyer")len(retrieved_docs[0].page_content) 9194 [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/multi_vector.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to migrate from legacy LangChain agents to LangGraph ](/v0.2/docs/how_to/migrate_agent/)[ Next How to pass multimodal data directly to models ](/v0.2/docs/how_to/multimodal_inputs/) * [Smaller chunks](#smaller-chunks) * [Associating summaries with a document for retrieval](#associating-summaries-with-a-document-for-retrieval) * [Hypothetical Queries](#hypothetical-queries)
null
https://python.langchain.com/v0.2/docs/tutorials/summarization/
* [](/v0.2/) * [Tutorials](/v0.2/docs/tutorials/) * Summarize Text On this page Summarize Text ============== Suppose you have a set of documents (PDFs, Notion pages, customer questions, etc.) and you want to summarize the content. LLMs are a great tool for this given their proficiency in understanding and synthesizing text. In the context of [retrieval-augmented generation](/v0.2/docs/tutorials/rag/), summarizing text can help distill the information in a large number of retrieved documents to provide context for a LLM. In this walkthrough we'll go over how to summarize content from multiple documents using LLMs. ![Image description](/v0.2/assets/images/summarization_use_case_1-874f7b2c94f64216f1f967fb5aca7bc1.png) Concepts[​](#concepts "Direct link to Concepts") ------------------------------------------------ Concepts we will cover are: * Using [language models](/v0.2/docs/concepts/#chat-models). * Using [document loaders](/v0.2/docs/concepts/#document-loaders), specifically the [WebBaseLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.web_base.WebBaseLoader.html) to load content from an HTML webpage. * Three ways to summarize or otherwise combine documents. 1. [Stuff](/v0.2/docs/tutorials/summarization/#stuff), which simply concatenates documents into a prompt; 2. [Map-reduce](/v0.2/docs/tutorials/summarization/#map-reduce), which splits documents into batches, summarizes those, and then summarizes the summaries; 3. [Refine](/v0.2/docs/tutorials/summarization/#refine), which updates a rolling summary be iterating over the documents in a sequence. That's a fair amount to cover! Let's dive in. Setup[​](#setup "Direct link to Setup") --------------------------------------- ### Jupyter Notebook[​](#jupyter-notebook "Direct link to Jupyter Notebook") This guide (and most of the other guides in the documentation) uses [Jupyter notebooks](https://jupyter.org/) and assumes the reader is as well. Jupyter notebooks are perfect for learning how to work with LLM systems because oftentimes things can go wrong (unexpected output, API down, etc) and going through guides in an interactive environment is a great way to better understand them. This and other tutorials are perhaps most conveniently run in a Jupyter notebook. See [here](https://jupyter.org/install) for instructions on how to install. ### Installation[​](#installation "Direct link to Installation") To install LangChain run: * Pip * Conda pip install langchain conda install langchain -c conda-forge For more details, see our [Installation guide](/v0.2/docs/how_to/installation/). ### LangSmith[​](#langsmith "Direct link to LangSmith") Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with [LangSmith](https://smith.langchain.com). After you sign up at the link above, make sure to set your environment variables to start logging traces: export LANGCHAIN_TRACING_V2="true"export LANGCHAIN_API_KEY="..." Or, if in a notebook, you can set them with: import getpassimport osos.environ["LANGCHAIN_TRACING_V2"] = "true"os.environ["LANGCHAIN_API_KEY"] = getpass.getpass() Overview[​](#overview "Direct link to Overview") ------------------------------------------------ A central question for building a summarizer is how to pass your documents into the LLM's context window. Three common approaches for this are: 1. `Stuff`: Simply "stuff" all your documents into a single prompt. This is the simplest approach (see [here](/v0.2/docs/tutorials/rag/#built-in-chains) for more on the `create_stuff_documents_chain` constructor, which is used for this method). 2. `Map-reduce`: Summarize each document on its own in a "map" step and then "reduce" the summaries into a final summary (see [here](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain.html) for more on the `MapReduceDocumentsChain`, which is used for this method). 3. `Refine`: Update a rolling summary be iterating over the documents in a sequence. ![Image description](/v0.2/assets/images/summarization_use_case_2-f2a4d5d60980a79140085fb7f8043217.png) Quickstart[​](#quickstart "Direct link to Quickstart") ------------------------------------------------------ To give you a sneak preview, either pipeline can be wrapped in a single object: `load_summarize_chain`. Suppose we want to summarize a blog post. We can create this in a few lines of code. First set environment variables and install packages: %pip install --upgrade --quiet langchain-openai tiktoken chromadb langchain# Set env var OPENAI_API_KEY or load from a .env file# import dotenv# dotenv.load_dotenv() We can use `chain_type="stuff"`, especially if using larger context window models such as: * 128k token OpenAI `gpt-4-turbo-2024-04-09` * 200k token Anthropic `claude-3-sonnet-20240229` We can also supply `chain_type="map_reduce"` or `chain_type="refine"`. First we load in our documents. We will use [WebBaseLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.web_base.WebBaseLoader.html) to load a blog post: import osos.environ["LANGCHAIN_TRACING_V2"] = "True" from langchain.chains.summarize import load_summarize_chainfrom langchain_community.document_loaders import WebBaseLoaderfrom langchain_openai import ChatOpenAIloader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")docs = loader.load()llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo-1106")chain = load_summarize_chain(llm, chain_type="stuff")result = chain.invoke(docs)print(result["output_text"]) **API Reference:**[load\_summarize\_chain](https://api.python.langchain.com/en/latest/chains/langchain.chains.summarize.chain.load_summarize_chain.html) | [WebBaseLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.web_base.WebBaseLoader.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) The article discusses the concept of LLM-powered autonomous agents, with a focus on the components of planning, memory, and tool use. It includes case studies and proof-of-concept examples, as well as challenges and references to related research. The author emphasizes the potential of LLMs in creating powerful problem-solving agents, while also highlighting limitations such as finite context length and reliability of natural language interfaces. Option 1. Stuff[​](#stuff "Direct link to Option 1. Stuff") ----------------------------------------------------------- When we use `load_summarize_chain` with `chain_type="stuff"`, we will use the [StuffDocumentsChain](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.stuff.StuffDocumentsChain.html#langchain.chains.combine_documents.stuff.StuffDocumentsChain). The chain will take a list of documents, insert them all into a prompt, and pass that prompt to an LLM: from langchain.chains.combine_documents.stuff import StuffDocumentsChainfrom langchain.chains.llm import LLMChainfrom langchain_core.prompts import PromptTemplate# Define promptprompt_template = """Write a concise summary of the following:"{text}"CONCISE SUMMARY:"""prompt = PromptTemplate.from_template(prompt_template)# Define LLM chainllm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo-16k")llm_chain = LLMChain(llm=llm, prompt=prompt)# Define StuffDocumentsChainstuff_chain = StuffDocumentsChain(llm_chain=llm_chain, document_variable_name="text")docs = loader.load()print(stuff_chain.invoke(docs)["output_text"]) **API Reference:**[StuffDocumentsChain](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.stuff.StuffDocumentsChain.html) | [LLMChain](https://api.python.langchain.com/en/latest/chains/langchain.chains.llm.LLMChain.html) | [PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html) The article discusses the concept of building autonomous agents powered by large language models (LLMs). It explores the components of such agents, including planning, memory, and tool use. The article provides case studies and examples of proof-of-concept demos, highlighting the challenges and limitations of LLM-powered agents. It also includes references to related research papers and projects. Great! We can see that we reproduce the earlier result using the `load_summarize_chain`. ### Go deeper[​](#go-deeper "Direct link to Go deeper") * You can easily customize the prompt. * You can easily try different LLMs, (e.g., [Claude](/v0.2/docs/integrations/chat/anthropic/)) via the `llm` parameter. Option 2. Map-Reduce[​](#map-reduce "Direct link to Option 2. Map-Reduce") -------------------------------------------------------------------------- Let's unpack the map reduce approach. For this, we'll first map each document to an individual summary using an `LLMChain`. Then we'll use a `ReduceDocumentsChain` to combine those summaries into a single global summary. First, we specify the LLMChain to use for mapping each document to an individual summary: from langchain.chains import MapReduceDocumentsChain, ReduceDocumentsChainfrom langchain_text_splitters import CharacterTextSplitterllm = ChatOpenAI(temperature=0)# Mapmap_template = """The following is a set of documents{docs}Based on this list of docs, please identify the main themes Helpful Answer:"""map_prompt = PromptTemplate.from_template(map_template)map_chain = LLMChain(llm=llm, prompt=map_prompt) **API Reference:**[MapReduceDocumentsChain](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain.html) | [ReduceDocumentsChain](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.reduce.ReduceDocumentsChain.html) | [CharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.CharacterTextSplitter.html) We can also use the Prompt Hub to store and fetch prompts. This will work with your [LangSmith API key](https://docs.smith.langchain.com/). For example, see the map prompt [here](https://smith.langchain.com/hub/rlm/map-prompt). from langchain import hubmap_prompt = hub.pull("rlm/map-prompt")map_chain = LLMChain(llm=llm, prompt=map_prompt) The `ReduceDocumentsChain` handles taking the document mapping results and reducing them into a single output. It wraps a generic `CombineDocumentsChain` (like `StuffDocumentsChain`) but adds the ability to collapse documents before passing it to the `CombineDocumentsChain` if their cumulative size exceeds `token_max`. In this example, we can actually re-use our chain for combining our docs to also collapse our docs. So if the cumulative number of tokens in our mapped documents exceeds 4000 tokens, then we'll recursively pass in the documents in batches of < 4000 tokens to our `StuffDocumentsChain` to create batched summaries. And once those batched summaries are cumulatively less than 4000 tokens, we'll pass them all one last time to the `StuffDocumentsChain` to create the final summary. # Reducereduce_template = """The following is set of summaries:{docs}Take these and distill it into a final, consolidated summary of the main themes. Helpful Answer:"""reduce_prompt = PromptTemplate.from_template(reduce_template) # Note we can also get this from the prompt hub, as noted abovereduce_prompt = hub.pull("rlm/reduce-prompt") reduce_prompt ChatPromptTemplate(input_variables=['docs'], metadata={'lc_hub_owner': 'rlm', 'lc_hub_repo': 'map-prompt', 'lc_hub_commit_hash': 'de4fba345f211a462584fc25b7077e69c1ba6cdcf4e21b7ec9abe457ddb16c87'}, messages=[HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['docs'], template='The following is a set of documents:\n{docs}\nBased on this list of docs, please identify the main themes \nHelpful Answer:'))]) # Run chainreduce_chain = LLMChain(llm=llm, prompt=reduce_prompt)# Takes a list of documents, combines them into a single string, and passes this to an LLMChaincombine_documents_chain = StuffDocumentsChain( llm_chain=reduce_chain, document_variable_name="docs")# Combines and iteratively reduces the mapped documentsreduce_documents_chain = ReduceDocumentsChain( # This is final chain that is called. combine_documents_chain=combine_documents_chain, # If documents exceed context for `StuffDocumentsChain` collapse_documents_chain=combine_documents_chain, # The maximum number of tokens to group documents into. token_max=4000,) Combining our map and reduce chains into one: # Combining documents by mapping a chain over them, then combining resultsmap_reduce_chain = MapReduceDocumentsChain( # Map chain llm_chain=map_chain, # Reduce chain reduce_documents_chain=reduce_documents_chain, # The variable name in the llm_chain to put the documents in document_variable_name="docs", # Return the results of the map steps in the output return_intermediate_steps=False,)text_splitter = CharacterTextSplitter.from_tiktoken_encoder( chunk_size=1000, chunk_overlap=0)split_docs = text_splitter.split_documents(docs) Created a chunk of size 1003, which is longer than the specified 1000 result = map_reduce_chain.invoke(split_docs)print(result["output_text"]) The main themes identified in the list of documents provided are related to large language models (LLMs), autonomous agents, prompting, steering language models, natural language processing (NLP), the use of tools to augment language models, reinforcement learning, reasoning, acting, self-reflection, and the integration of language models with external knowledge sources. If we follow the [Langsmith Trace](https://smith.langchain.com/public/3a1a6d51-68e5-4805-8d90-78920ce60a51/r), we can see the the individual LLM summarizations, including the [final call](https://smith.langchain.com/public/69482813-f0b7-46b0-a99f-86d56fc9644a/r) that summarizes the summaries. ### Go deeper[​](#go-deeper-1 "Direct link to Go deeper") **Customization** * As shown above, you can customize the LLMs and prompts for map and reduce stages. **Real-world use-case** * See [this blog post](https://blog.langchain.dev/llms-to-improve-documentation/) case-study on analyzing user interactions (questions about LangChain documentation)! * The blog post and associated [repo](https://github.com/mendableai/QA_clustering) also introduce clustering as a means of summarization. * This opens up another path beyond the `stuff` or `map-reduce` approaches that is worth considering. ![Image description](/v0.2/assets/images/summarization_use_case_3-896f435bc48194ddaead73043027e16f.png) Option 3. Refine[​](#refine "Direct link to Option 3. Refine") -------------------------------------------------------------- [RefineDocumentsChain](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.refine.RefineDocumentsChain.html) is similar to map-reduce: > The refine documents chain constructs a response by looping over the input documents and iteratively updating its answer. For each document, it passes all non-document inputs, the current document, and the latest intermediate answer to an LLM chain to get a new answer. This can be easily run with the `chain_type="refine"` specified. chain = load_summarize_chain(llm, chain_type="refine")result = chain.invoke(split_docs)print(result["output_text"]) The existing summary provides detailed instructions for implementing a project's architecture through code, focusing on creating core classes, functions, and methods in different files following best practices for the chosen language and framework. Assumptions about the model, view, and controller components are also outlined. The additional context highlights challenges in long-term planning and task decomposition, as well as the reliability issues with natural language interfaces in LLM-powered autonomous agents. These insights shed light on the limitations and potential pitfalls of using LLMs in agent systems, with references to recent research on LLM-powered autonomous agents and related technologies. Following the [Langsmith trace](https://smith.langchain.com/public/38017fa7-b190-4635-992c-e8554227a4bb/r), we can see the summaries iteratively updated with new information. It's also possible to supply a prompt and return intermediate steps. prompt_template = """Write a concise summary of the following:{text}CONCISE SUMMARY:"""prompt = PromptTemplate.from_template(prompt_template)refine_template = ( "Your job is to produce a final summary\n" "We have provided an existing summary up to a certain point: {existing_answer}\n" "We have the opportunity to refine the existing summary" "(only if needed) with some more context below.\n" "------------\n" "{text}\n" "------------\n" "Given the new context, refine the original summary in Italian" "If the context isn't useful, return the original summary.")refine_prompt = PromptTemplate.from_template(refine_template)chain = load_summarize_chain( llm=llm, chain_type="refine", question_prompt=prompt, refine_prompt=refine_prompt, return_intermediate_steps=True, input_key="input_documents", output_key="output_text",)result = chain.invoke({"input_documents": split_docs}, return_only_outputs=True) print(result["output_text"]) Il presente articolo discute il concetto di costruire agenti autonomi utilizzando LLM (large language model) come controller principale. Esplora i diversi componenti di un sistema di agenti alimentato da LLM, tra cui la pianificazione, la memoria e l'uso degli strumenti. Dimostrazioni di concetto come AutoGPT mostrano il potenziale di LLM come risolutore generale di problemi. Approcci come Chain of Thought, Tree of Thoughts, LLM+P, ReAct e Reflexion consentono agli agenti autonomi di pianificare, riflettere su se stessi e migliorarsi iterativamente. Tuttavia, ci sono sfide da affrontare, come la limitata capacità di contesto che limita l'inclusione di informazioni storiche dettagliate e la difficoltà di pianificazione a lungo termine e decomposizione delle attività. Inoltre, l'affidabilità dell'interfaccia di linguaggio naturale tra LLM e componenti esterni come la memoria e gli strumenti è incerta, poiché i LLM possono commettere errori di formattazione e mostrare comportamenti ribelli. Nonostante ciò, il sistema AutoGPT viene menzionato come esempio di dimostrazione di concetto che utilizza LLM come controller principale per agenti autonomi. Questo articolo fa riferimento a diverse fonti che esplorano approcci e applicazioni specifiche di LLM nell'ambito degli agenti autonomi. print("\n\n".join(result["intermediate_steps"][:3])) This article discusses the concept of building autonomous agents using LLM (large language model) as the core controller. The article explores the different components of an LLM-powered agent system, including planning, memory, and tool use. It also provides examples of proof-of-concept demos and highlights the potential of LLM as a general problem solver.Questo articolo discute del concetto di costruire agenti autonomi utilizzando LLM (large language model) come controller principale. L'articolo esplora i diversi componenti di un sistema di agenti alimentato da LLM, inclusa la pianificazione, la memoria e l'uso degli strumenti. Vengono forniti anche esempi di dimostrazioni di proof-of-concept e si evidenzia il potenziale di LLM come risolutore generale di problemi. Inoltre, vengono presentati approcci come Chain of Thought, Tree of Thoughts, LLM+P, ReAct e Reflexion che consentono agli agenti autonomi di pianificare, riflettere su se stessi e migliorare iterativamente.Questo articolo discute del concetto di costruire agenti autonomi utilizzando LLM (large language model) come controller principale. L'articolo esplora i diversi componenti di un sistema di agenti alimentato da LLM, inclusa la pianificazione, la memoria e l'uso degli strumenti. Vengono forniti anche esempi di dimostrazioni di proof-of-concept e si evidenzia il potenziale di LLM come risolutore generale di problemi. Inoltre, vengono presentati approcci come Chain of Thought, Tree of Thoughts, LLM+P, ReAct e Reflexion che consentono agli agenti autonomi di pianificare, riflettere su se stessi e migliorare iterativamente. Il nuovo contesto riguarda l'approccio Chain of Hindsight (CoH) che permette al modello di migliorare autonomamente i propri output attraverso un processo di apprendimento supervisionato. Viene anche presentato l'approccio Algorithm Distillation (AD) che applica lo stesso concetto alle traiettorie di apprendimento per compiti di reinforcement learning. Splitting and summarizing in a single chain[​](#splitting-and-summarizing-in-a-single-chain "Direct link to Splitting and summarizing in a single chain") --------------------------------------------------------------------------------------------------------------------------------------------------------- For convenience, we can wrap both the text splitting of our long document and summarizing in a single `AnalyzeDocumentsChain`. from langchain.chains import AnalyzeDocumentChainsummarize_document_chain = AnalyzeDocumentChain( combine_docs_chain=chain, text_splitter=text_splitter)summarize_document_chain.invoke(docs[0].page_content) **API Reference:**[AnalyzeDocumentChain](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.base.AnalyzeDocumentChain.html) Next steps[​](#next-steps "Direct link to Next steps") ------------------------------------------------------ We encourage you to check out the [how-to guides](/v0.2/docs/how_to/) for more detail on: * Built-in [document loaders](/v0.2/docs/how_to/#document-loaders) and [text-splitters](/v0.2/docs/how_to/#text-splitters) * Integrating various combine-document chains into a [RAG application](/v0.2/docs/tutorials/rag/) * Incorporating retrieval into a [chatbot](/v0.2/docs/how_to/chatbots_retrieval/) and other concepts. [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/tutorials/summarization.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous Build a Question/Answering system over SQL data ](/v0.2/docs/tutorials/sql_qa/)[ Next How-to guides ](/v0.2/docs/how_to/) * [Concepts](#concepts) * [Setup](#setup) * [Jupyter Notebook](#jupyter-notebook) * [Installation](#installation) * [LangSmith](#langsmith) * [Overview](#overview) * [Quickstart](#quickstart) * [Option 1. Stuff](#stuff) * [Go deeper](#go-deeper) * [Option 2. Map-Reduce](#map-reduce) * [Go deeper](#go-deeper-1) * [Option 3. Refine](#refine) * [Splitting and summarizing in a single chain](#splitting-and-summarizing-in-a-single-chain) * [Next steps](#next-steps)
null
https://python.langchain.com/v0.2/docs/tutorials/
* [](/v0.2/) * Tutorials On this page Tutorials ========= New to LangChain or to LLM app development in general? Read this material to quickly get up and running. Basics[​](#basics "Direct link to Basics") ------------------------------------------ * [Build a Simple LLM Application with LCEL](/v0.2/docs/tutorials/llm_chain/) * [Build a Chatbot](/v0.2/docs/tutorials/chatbot/) * [Build vector stores and retrievers](/v0.2/docs/tutorials/retrievers/) * [Build an Agent](/v0.2/docs/tutorials/agents/) Working with external knowledge[​](#working-with-external-knowledge "Direct link to Working with external knowledge") --------------------------------------------------------------------------------------------------------------------- * [Build a Retrieval Augmented Generation (RAG) Application](/v0.2/docs/tutorials/rag/) * [Build a Conversational RAG Application](/v0.2/docs/tutorials/qa_chat_history/) * [Build a Question/Answering system over SQL data](/v0.2/docs/tutorials/sql_qa/) * [Build a Query Analysis System](/v0.2/docs/tutorials/query_analysis/) * [Build a local RAG application](/v0.2/docs/tutorials/local_rag/) * [Build a Question Answering application over a Graph Database](/v0.2/docs/tutorials/graph/) * [Build a PDF ingestion and Question/Answering system](/v0.2/docs/tutorials/pdf_qa/) Specialized tasks[​](#specialized-tasks "Direct link to Specialized tasks") --------------------------------------------------------------------------- * [Build an Extraction Chain](/v0.2/docs/tutorials/extraction/) * [Generate synthetic data](/v0.2/docs/tutorials/data_generation/) * [Classify text into labels](/v0.2/docs/tutorials/classification/) * [Summarize text](/v0.2/docs/tutorials/summarization/) LangGraph[​](#langgraph "Direct link to LangGraph") --------------------------------------------------- LangGraph is an extension of LangChain aimed at building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph. LangGraph documentation is currently hosted on a separate site. You can peruse [LangGraph tutorials here](https://langchain-ai.github.io/langgraph/tutorials/). LangSmith[​](#langsmith "Direct link to LangSmith") --------------------------------------------------- LangSmith allows you to closely trace, monitor and evaluate your LLM application. It seamlessly integrates with LangChain, and you can use it to inspect and debug individual steps of your chains as you build. LangSmith documentation is hosted on a separate site. You can peruse [LangSmith tutorials here](https://docs.smith.langchain.com/tutorials/). ### Evaluation[​](#evaluation "Direct link to Evaluation") LangSmith helps you evaluate the performance of your LLM applications. The below tutorial is a great way to get started: * [Evaluate your LLM application](https://docs.smith.langchain.com/tutorials/Developers/evaluation) More[​](#more "Direct link to More") ------------------------------------ For more tutorials, see our [cookbook section](https://github.com/langchain-ai/langchain/tree/master/cookbook). [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/tutorials/index.mdx) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous Introduction ](/v0.2/docs/introduction/)[ Next Build a Question Answering application over a Graph Database ](/v0.2/docs/tutorials/graph/) * [Basics](#basics) * [Working with external knowledge](#working-with-external-knowledge) * [Specialized tasks](#specialized-tasks) * [LangGraph](#langgraph) * [LangSmith](#langsmith) * [Evaluation](#evaluation) * [More](#more)
null
https://python.langchain.com/v0.2/docs/how_to/migrate_agent/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to migrate from legacy LangChain agents to LangGraph On this page How to migrate from legacy LangChain agents to LangGraph ======================================================== Here we focus on how to move from legacy LangChain agents to LangGraph agents. LangChain agents (the [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor) in particular) have multiple configuration parameters. In this notebook we will show how those parameters map to the LangGraph [react agent executor](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent). #### Prerequisites[​](#prerequisites "Direct link to Prerequisites") This how-to guide uses OpenAI as the LLM. Install the dependencies to run. %%capture --no-stderr%pip install -U langgraph langchain langchain-openai Basic Usage[​](#basic-usage "Direct link to Basic Usage") --------------------------------------------------------- For basic creation and usage of a tool-calling ReAct-style agent, the functionality is the same. First, let's define a model and tool(s), then we'll use those to create an agent. from langchain_core.tools import toolfrom langchain_openai import ChatOpenAImodel = ChatOpenAI(model="gpt-4o")@tooldef magic_function(input: int) -> int: """Applies a magic function to an input.""" return input + 2tools = [magic_function]query = "what is the value of magic_function(3)?" **API Reference:**[tool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) For the LangChain [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor), we define a prompt with a placeholder for the agent's scratchpad. The agent can be invoked as follows: from langchain.agents import AgentExecutor, create_tool_calling_agentfrom langchain_core.prompts import ChatPromptTemplateprompt = ChatPromptTemplate.from_messages( [ ("system", "You are a helpful assistant"), ("human", "{input}"), # Placeholders fill up a **list** of messages ("placeholder", "{agent_scratchpad}"), ])agent = create_tool_calling_agent(model, tools, prompt)agent_executor = AgentExecutor(agent=agent, tools=tools)agent_executor.invoke({"input": query}) **API Reference:**[AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html) | [create\_tool\_calling\_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.tool_calling_agent.base.create_tool_calling_agent.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) {'input': 'what is the value of magic_function(3)?', 'output': 'The value of `magic_function(3)` is 5.'} LangGraph's [react agent executor](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent) manages a state that is defined by a list of messages. It will continue to process the list until there are no tool calls in the agent's output. To kick it off, we input a list of messages. The output will contain the entire state of the graph-- in this case, the conversation history. from langgraph.prebuilt import create_react_agentapp = create_react_agent(model, tools)messages = app.invoke({"messages": [("human", query)]}){ "input": query, "output": messages["messages"][-1].content,} {'input': 'what is the value of magic_function(3)?', 'output': 'The value of `magic_function(3)` is 5.'} message_history = messages["messages"]new_query = "Pardon?"messages = app.invoke({"messages": message_history + [("human", new_query)]}){ "input": new_query, "output": messages["messages"][-1].content,} {'input': 'Pardon?', 'output': 'The result of applying `magic_function` to the input 3 is 5.'} Prompt Templates[​](#prompt-templates "Direct link to Prompt Templates") ------------------------------------------------------------------------ With legacy LangChain agents you have to pass in a prompt template. You can use this to control the agent. With LangGraph [react agent executor](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent), by default there is no prompt. You can achieve similar control over the agent in a few ways: 1. Pass in a system message as input 2. Initialize the agent with a system message 3. Initialize the agent with a function to transform messages before passing to the model. Let's take a look at all of these below. We will pass in custom instructions to get the agent to respond in Spanish. First up, using AgentExecutor: prompt = ChatPromptTemplate.from_messages( [ ("system", "You are a helpful assistant. Respond only in Spanish."), ("human", "{input}"), # Placeholders fill up a **list** of messages ("placeholder", "{agent_scratchpad}"), ])agent = create_tool_calling_agent(model, tools, prompt)agent_executor = AgentExecutor(agent=agent, tools=tools)agent_executor.invoke({"input": query}) {'input': 'what is the value of magic_function(3)?', 'output': 'El valor de `magic_function(3)` es 5.'} Now, let's pass a custom system message to [react agent executor](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent). This can either be a string or a LangChain SystemMessage. from langchain_core.messages import SystemMessagefrom langgraph.prebuilt import create_react_agentsystem_message = "You are a helpful assistant. Respond only in Spanish."# This could also be a SystemMessage object# system_message = SystemMessage(content="You are a helpful assistant. Respond only in Spanish.")app = create_react_agent(model, tools, messages_modifier=system_message)messages = app.invoke({"messages": [("user", query)]}) **API Reference:**[SystemMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.system.SystemMessage.html) We can also pass in an arbitrary function. This function should take in a list of messages and output a list of messages. We can do all types of arbitrary formatting of messages here. In this cases, let's just add a SystemMessage to the start of the list of messages. from langchain_core.messages import AnyMessagefrom langgraph.prebuilt import create_react_agentprompt = ChatPromptTemplate.from_messages( [ ("system", "You are a helpful assistant. Respond only in Spanish."), ("placeholder", "{messages}"), ])def _modify_messages(messages: list[AnyMessage]): return prompt.invoke({"messages": messages}).to_messages() + [ ("user", "Also say 'Pandamonium!' after the answer.") ]app = create_react_agent(model, tools, messages_modifier=_modify_messages)messages = app.invoke({"messages": [("human", query)]})print( { "input": query, "output": messages["messages"][-1].content, }) {'input': 'what is the value of magic_function(3)?', 'output': 'El valor de magic_function(3) es 5. ¡Pandamonium!'} Memory[​](#memory "Direct link to Memory") ------------------------------------------ With LangChain's [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter), you could add chat [Memory](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.memory) so it can engage in a multi-turn conversation. from langchain.agents import AgentExecutor, create_tool_calling_agentfrom langchain_community.chat_message_histories import ChatMessageHistoryfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.runnables.history import RunnableWithMessageHistoryfrom langchain_core.tools import toolfrom langchain_openai import ChatOpenAImodel = ChatOpenAI(model="gpt-4o")memory = ChatMessageHistory(session_id="test-session")prompt = ChatPromptTemplate.from_messages( [ ("system", "You are a helpful assistant."), # First put the history ("placeholder", "{chat_history}"), # Then the new input ("human", "{input}"), # Finally the scratchpad ("placeholder", "{agent_scratchpad}"), ])@tooldef magic_function(input: int) -> int: """Applies a magic function to an input.""" return input + 2tools = [magic_function]agent = create_tool_calling_agent(model, tools, prompt)agent_executor = AgentExecutor(agent=agent, tools=tools)agent_with_chat_history = RunnableWithMessageHistory( agent_executor, # This is needed because in most real world scenarios, a session id is needed # It isn't really used here because we are using a simple in memory ChatMessageHistory lambda session_id: memory, input_messages_key="input", history_messages_key="chat_history",)config = {"configurable": {"session_id": "test-session"}}print( agent_with_chat_history.invoke( {"input": "Hi, I'm polly! What's the output of magic_function of 3?"}, config )["output"])print("---")print(agent_with_chat_history.invoke({"input": "Remember my name?"}, config)["output"])print("---")print( agent_with_chat_history.invoke({"input": "what was that output again?"}, config)[ "output" ]) **API Reference:**[AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html) | [create\_tool\_calling\_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.tool_calling_agent.base.create_tool_calling_agent.html) | [ChatMessageHistory](https://api.python.langchain.com/en/latest/chat_history/langchain_core.chat_history.ChatMessageHistory.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [RunnableWithMessageHistory](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html) | [tool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) Hi Polly! The output of the magic function for the input 3 is 5.---Yes, I remember your name, Polly! How can I assist you further?---The output of the magic function for the input 3 is 5. #### In LangGraph[​](#in-langgraph "Direct link to In LangGraph") Memory is just [persistence](https://langchain-ai.github.io/langgraph/how-tos/persistence/), aka [checkpointing](https://langchain-ai.github.io/langgraph/reference/checkpoints/). Add a `checkpointer` to the agent and you get chat memory for free. from langchain_core.messages import SystemMessagefrom langgraph.checkpoint import MemorySaver # an in-memory checkpointerfrom langgraph.prebuilt import create_react_agentsystem_message = "You are a helpful assistant."# This could also be a SystemMessage object# system_message = SystemMessage(content="You are a helpful assistant. Respond only in Spanish.")memory = MemorySaver()app = create_react_agent( model, tools, messages_modifier=system_message, checkpointer=memory)config = {"configurable": {"thread_id": "test-thread"}}print( app.invoke( { "messages": [ ("user", "Hi, I'm polly! What's the output of magic_function of 3?") ] }, config, )["messages"][-1].content)print("---")print( app.invoke({"messages": [("user", "Remember my name?")]}, config)["messages"][ -1 ].content)print("---")print( app.invoke({"messages": [("user", "what was that output again?")]}, config)[ "messages" ][-1].content) **API Reference:**[SystemMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.system.SystemMessage.html) Hi Polly! The output of the magic_function for the input 3 is 5.---Yes, your name is Polly!---The output of the magic_function for the input 3 was 5. Iterating through steps[​](#iterating-through-steps "Direct link to Iterating through steps") --------------------------------------------------------------------------------------------- With LangChain's [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter), you could iterate over the steps using the [stream](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.stream) (or async `astream`) methods or the [iter](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter) method. LangGraph supports stepwise iteration using [stream](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.stream) from langchain.agents import AgentExecutor, create_tool_calling_agentfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.tools import toolfrom langchain_openai import ChatOpenAImodel = ChatOpenAI(model="gpt-4o")prompt = ChatPromptTemplate.from_messages( [ ("system", "You are a helpful assistant."), ("human", "{input}"), # Placeholders fill up a **list** of messages ("placeholder", "{agent_scratchpad}"), ])@tooldef magic_function(input: int) -> int: """Applies a magic function to an input.""" return input + 2tools = [magic_function]agent = create_tool_calling_agent(model, tools, prompt=prompt)agent_executor = AgentExecutor(agent=agent, tools=tools)for step in agent_executor.stream({"input": query}): print(step) **API Reference:**[AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html) | [create\_tool\_calling\_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.tool_calling_agent.base.create_tool_calling_agent.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [tool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) {'actions': [ToolAgentAction(tool='magic_function', tool_input={'input': 3}, log="\nInvoking: `magic_function` with `{'input': 3}`\n\n\n", message_log=[AIMessageChunk(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_q9MgGFjqJbV2xSUX93WqxmOt', 'function': {'arguments': '{"input":3}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'finish_reason': 'tool_calls'}, id='run-c68fd76f-a3c3-4c3c-bfd7-748c171ed4b8', tool_calls=[{'name': 'magic_function', 'args': {'input': 3}, 'id': 'call_q9MgGFjqJbV2xSUX93WqxmOt'}], tool_call_chunks=[{'name': 'magic_function', 'args': '{"input":3}', 'id': 'call_q9MgGFjqJbV2xSUX93WqxmOt', 'index': 0}])], tool_call_id='call_q9MgGFjqJbV2xSUX93WqxmOt')], 'messages': [AIMessageChunk(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_q9MgGFjqJbV2xSUX93WqxmOt', 'function': {'arguments': '{"input":3}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'finish_reason': 'tool_calls'}, id='run-c68fd76f-a3c3-4c3c-bfd7-748c171ed4b8', tool_calls=[{'name': 'magic_function', 'args': {'input': 3}, 'id': 'call_q9MgGFjqJbV2xSUX93WqxmOt'}], tool_call_chunks=[{'name': 'magic_function', 'args': '{"input":3}', 'id': 'call_q9MgGFjqJbV2xSUX93WqxmOt', 'index': 0}])]}{'steps': [AgentStep(action=ToolAgentAction(tool='magic_function', tool_input={'input': 3}, log="\nInvoking: `magic_function` with `{'input': 3}`\n\n\n", message_log=[AIMessageChunk(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_q9MgGFjqJbV2xSUX93WqxmOt', 'function': {'arguments': '{"input":3}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'finish_reason': 'tool_calls'}, id='run-c68fd76f-a3c3-4c3c-bfd7-748c171ed4b8', tool_calls=[{'name': 'magic_function', 'args': {'input': 3}, 'id': 'call_q9MgGFjqJbV2xSUX93WqxmOt'}], tool_call_chunks=[{'name': 'magic_function', 'args': '{"input":3}', 'id': 'call_q9MgGFjqJbV2xSUX93WqxmOt', 'index': 0}])], tool_call_id='call_q9MgGFjqJbV2xSUX93WqxmOt'), observation=5)], 'messages': [FunctionMessage(content='5', name='magic_function')]}{'output': 'The value of `magic_function(3)` is 5.', 'messages': [AIMessage(content='The value of `magic_function(3)` is 5.')]} #### In LangGraph[​](#in-langgraph-1 "Direct link to In LangGraph") In LangGraph, things are handled natively using [stream](https://langchain-ai.github.io/langgraph/reference/graphs/#langgraph.graph.graph.CompiledGraph.stream) or the asynchronous `astream` method. from langchain_core.messages import AnyMessagefrom langgraph.prebuilt import create_react_agentprompt = ChatPromptTemplate.from_messages( [ ("system", "You are a helpful assistant."), ("placeholder", "{messages}"), ])def _modify_messages(messages: list[AnyMessage]): return prompt.invoke({"messages": messages}).to_messages()app = create_react_agent(model, tools, messages_modifier=_modify_messages)for step in app.stream({"messages": [("human", query)]}, stream_mode="updates"): print(step) {'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_yTjXXibj76tyFyPRa1soLo0S', 'function': {'arguments': '{"input":3}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 70, 'total_tokens': 84}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-b275f314-c42e-4e77-9dec-5c23f7dbd53b-0', tool_calls=[{'name': 'magic_function', 'args': {'input': 3}, 'id': 'call_yTjXXibj76tyFyPRa1soLo0S'}])]}}{'tools': {'messages': [ToolMessage(content='5', name='magic_function', id='41c5f227-528d-4483-a313-b03b23b1d327', tool_call_id='call_yTjXXibj76tyFyPRa1soLo0S')]}}{'agent': {'messages': [AIMessage(content='The value of `magic_function(3)` is 5.', response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 93, 'total_tokens': 107}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'stop', 'logprobs': None}, id='run-0ef12b6e-415d-4758-9b62-5e5e1b350072-0')]}} `return_intermediate_steps`[​](#return_intermediate_steps "Direct link to return_intermediate_steps") ----------------------------------------------------------------------------------------------------- Setting this parameter on AgentExecutor allows users to access intermediate\_steps, which pairs agent actions (e.g., tool invocations) with their outcomes. agent_executor = AgentExecutor(agent=agent, tools=tools, return_intermediate_steps=True)result = agent_executor.invoke({"input": query})print(result["intermediate_steps"]) [(ToolAgentAction(tool='magic_function', tool_input={'input': 3}, log="\nInvoking: `magic_function` with `{'input': 3}`\n\n\n", message_log=[AIMessageChunk(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_ABI4hftfEdnVgKyfF6OzZbca', 'function': {'arguments': '{"input":3}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'finish_reason': 'tool_calls'}, id='run-837e794f-cfd8-40e0-8abc-4d98ced11b75', tool_calls=[{'name': 'magic_function', 'args': {'input': 3}, 'id': 'call_ABI4hftfEdnVgKyfF6OzZbca'}], tool_call_chunks=[{'name': 'magic_function', 'args': '{"input":3}', 'id': 'call_ABI4hftfEdnVgKyfF6OzZbca', 'index': 0}])], tool_call_id='call_ABI4hftfEdnVgKyfF6OzZbca'), 5)] By default the [react agent executor](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent) in LangGraph appends all messages to the central state. Therefore, it is easy to see any intermediate steps by just looking at the full state. from langgraph.prebuilt import create_react_agentapp = create_react_agent(model, tools=tools)messages = app.invoke({"messages": [("human", query)]})messages {'messages': [HumanMessage(content='what is the value of magic_function(3)?', id='0f63e437-c4d8-4da9-b6f5-b293ebfe4a64'), AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_S96v28LlI6hNkQrNnIio0JPh', 'function': {'arguments': '{"input":3}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 64, 'total_tokens': 78}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-ffef7898-14b1-4537-ad90-7c000a8a5d25-0', tool_calls=[{'name': 'magic_function', 'args': {'input': 3}, 'id': 'call_S96v28LlI6hNkQrNnIio0JPh'}]), ToolMessage(content='5', name='magic_function', id='fbd9df4e-1dda-4d3e-9044-b001f7875476', tool_call_id='call_S96v28LlI6hNkQrNnIio0JPh'), AIMessage(content='The value of `magic_function(3)` is 5.', response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 87, 'total_tokens': 101}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'stop', 'logprobs': None}, id='run-e5d94c54-d9f4-45cd-be8e-a9101a8d88d6-0')]} `max_iterations`[​](#max_iterations "Direct link to max_iterations") -------------------------------------------------------------------- `AgentExecutor` implements a `max_iterations` parameter, whereas this is controlled via `recursion_limit` in LangGraph. Note that in AgentExecutor, an "iteration" includes a full turn of tool invocation and execution. In LangGraph, each step contributes to the recursion limit, so we will need to multiply by two (and add one) to get equivalent results. If the recursion limit is reached, LangGraph raises a specific exception type, that we can catch and manage similarly to AgentExecutor. @tooldef magic_function(input: str) -> str: """Applies a magic function to an input.""" return "Sorry, there was an error. Please try again."tools = [magic_function] prompt = ChatPromptTemplate.from_messages( [ ("system", "You are a helpful assistant. Respond only in Spanish."), ("human", "{input}"), # Placeholders fill up a **list** of messages ("placeholder", "{agent_scratchpad}"), ])agent = create_tool_calling_agent(model, tools, prompt)agent_executor = AgentExecutor( agent=agent, tools=tools, verbose=True, max_iterations=3,)agent_executor.invoke({"input": query}) > Entering new AgentExecutor chain...Invoking: `magic_function` with `{'input': '3'}`Sorry, there was an error. Please try again.Invoking: `magic_function` with `{'input': '3'}`responded: Parece que hubo un error al intentar obtener el valor de `magic_function(3)`. Permíteme intentarlo de nuevo.Sorry, there was an error. Please try again.Aún no puedo obtener el valor de `magic_function(3)`. ¿Hay algo más en lo que pueda ayudarte?> Finished chain. {'input': 'what is the value of magic_function(3)?', 'output': 'Aún no puedo obtener el valor de `magic_function(3)`. ¿Hay algo más en lo que pueda ayudarte?'} from langgraph.errors import GraphRecursionErrorfrom langgraph.prebuilt import create_react_agentRECURSION_LIMIT = 2 * 3 + 1app = create_react_agent(model, tools=tools)try: for chunk in app.stream( {"messages": [("human", query)]}, {"recursion_limit": RECURSION_LIMIT}, stream_mode="values", ): print(chunk["messages"][-1])except GraphRecursionError: print({"input": query, "output": "Agent stopped due to max iterations."}) ('human', 'what is the value of magic_function(3)?')content='' additional_kwargs={'tool_calls': [{'id': 'call_pFdKcCu5taDTtOOfX14vEDRp', 'function': {'arguments': '{"input":"3"}', 'name': 'magic_function'}, 'type': 'function'}]} response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 64, 'total_tokens': 78}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None} id='run-25836468-ba7e-43be-a7cf-76bba06a2a08-0' tool_calls=[{'name': 'magic_function', 'args': {'input': '3'}, 'id': 'call_pFdKcCu5taDTtOOfX14vEDRp'}]content='Sorry, there was an error. Please try again.' name='magic_function' id='1a08b883-9c7b-4969-9e9b-67ce64cdcb5f' tool_call_id='call_pFdKcCu5taDTtOOfX14vEDRp'content='It seems there was an error when trying to apply the magic function. Let me try again.' additional_kwargs={'tool_calls': [{'id': 'call_DA0lpDIkBFg2GHy4WsEcZG4K', 'function': {'arguments': '{"input":"3"}', 'name': 'magic_function'}, 'type': 'function'}]} response_metadata={'token_usage': {'completion_tokens': 34, 'prompt_tokens': 97, 'total_tokens': 131}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None} id='run-d571b774-0ea3-4e35-8b7d-f32932c3f3cc-0' tool_calls=[{'name': 'magic_function', 'args': {'input': '3'}, 'id': 'call_DA0lpDIkBFg2GHy4WsEcZG4K'}]content='Sorry, there was an error. Please try again.' name='magic_function' id='0b45787b-c82a-487f-9a5a-de129c30460f' tool_call_id='call_DA0lpDIkBFg2GHy4WsEcZG4K'content='It appears that there is a consistent issue when trying to apply the magic function to the input "3." This could be due to various reasons, such as the input not being in the correct format or an internal error.\n\nIf you have any other questions or if there\'s something else you\'d like to try, please let me know!' response_metadata={'token_usage': {'completion_tokens': 66, 'prompt_tokens': 153, 'total_tokens': 219}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'stop', 'logprobs': None} id='run-50a962e6-21b7-4327-8dea-8e2304062627-0' `max_execution_time`[​](#max_execution_time "Direct link to max_execution_time") -------------------------------------------------------------------------------- `AgentExecutor` implements a `max_execution_time` parameter, allowing users to abort a run that exceeds a total time limit. import time@tooldef magic_function(input: str) -> str: """Applies a magic function to an input.""" time.sleep(2.5) return "Sorry, there was an error. Please try again."tools = [magic_function]agent = create_tool_calling_agent(model, tools, prompt)agent_executor = AgentExecutor( agent=agent, tools=tools, max_execution_time=2, verbose=True,)agent_executor.invoke({"input": query}) > Entering new AgentExecutor chain...Invoking: `magic_function` with `{'input': '3'}`Sorry, there was an error. Please try again.> Finished chain. {'input': 'what is the value of magic_function(3)?', 'output': 'Agent stopped due to max iterations.'} With LangGraph's react agent, you can control timeouts on two levels. You can set a `step_timeout` to bound each **step**: from langgraph.prebuilt import create_react_agentapp = create_react_agent(model, tools=tools)# Set the max timeout for each step hereapp.step_timeout = 2try: for chunk in app.stream({"messages": [("human", query)]}): print(chunk) print("------")except TimeoutError: print({"input": query, "output": "Agent stopped due to max iterations."}) {'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_HaQkeCwD5QskzJzFixCBacZ4', 'function': {'arguments': '{"input":"3"}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 64, 'total_tokens': 78}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-596c9200-771f-436d-8576-72fcb81620f1-0', tool_calls=[{'name': 'magic_function', 'args': {'input': '3'}, 'id': 'call_HaQkeCwD5QskzJzFixCBacZ4'}])]}}------{'input': 'what is the value of magic_function(3)?', 'output': 'Agent stopped due to max iterations.'} The other way to set a single max timeout for an entire run is to directly use the python stdlib [asyncio](https://docs.python.org/3/library/asyncio.html) library. import asynciofrom langgraph.prebuilt import create_react_agentapp = create_react_agent(model, tools=tools)async def stream(app, inputs): async for chunk in app.astream({"messages": [("human", query)]}): print(chunk) print("------")try: task = asyncio.create_task(stream(app, {"messages": [("human", query)]})) await asyncio.wait_for(task, timeout=3)except TimeoutError: print("Task Cancelled.") {'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_4agJXUHtmHrOOMogjF6ZuzAv', 'function': {'arguments': '{"input":"3"}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 64, 'total_tokens': 78}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-a1c77db7-405f-43d9-8d57-751f2ca1a58c-0', tool_calls=[{'name': 'magic_function', 'args': {'input': '3'}, 'id': 'call_4agJXUHtmHrOOMogjF6ZuzAv'}])]}}------Task Cancelled. `early_stopping_method`[​](#early_stopping_method "Direct link to early_stopping_method") ----------------------------------------------------------------------------------------- With LangChain's [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter), you could configure an [early\_stopping\_method](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.early_stopping_method) to either return a string saying "Agent stopped due to iteration limit or time limit." (`"force"`) or prompt the LLM a final time to respond (`"generate"`). from langchain.agents import AgentExecutor, create_tool_calling_agentfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.tools import toolfrom langchain_openai import ChatOpenAImodel = ChatOpenAI(model="gpt-4o")prompt = ChatPromptTemplate.from_messages( [ ("system", "You are a helpful assistant."), ("human", "{input}"), # Placeholders fill up a **list** of messages ("placeholder", "{agent_scratchpad}"), ])@tooldef magic_function(input: int) -> int: """Applies a magic function to an input.""" return "Sorry there was an error, please try again."tools = [magic_function]agent = create_tool_calling_agent(model, tools, prompt=prompt)agent_executor = AgentExecutor( agent=agent, tools=tools, early_stopping_method="force", max_iterations=1)result = agent_executor.invoke({"input": query})print("Output with early_stopping_method='force':")print(result["output"]) **API Reference:**[AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html) | [create\_tool\_calling\_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.tool_calling_agent.base.create_tool_calling_agent.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [tool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) Output with early_stopping_method='force':Agent stopped due to max iterations. #### In LangGraph[​](#in-langgraph-2 "Direct link to In LangGraph") In LangGraph, you can explicitly handle the response behavior outside the agent, since the full state can be accessed. from langgraph.errors import GraphRecursionErrorfrom langgraph.prebuilt import create_react_agentRECURSION_LIMIT = 2 * 1 + 1app = create_react_agent(model, tools=tools)try: for chunk in app.stream( {"messages": [("human", query)]}, {"recursion_limit": RECURSION_LIMIT}, stream_mode="values", ): print(chunk["messages"][-1])except GraphRecursionError: print({"input": query, "output": "Agent stopped due to max iterations."}) ('human', 'what is the value of magic_function(3)?')content='' additional_kwargs={'tool_calls': [{'id': 'call_bTURmOn9C8zslmn0kMFeykIn', 'function': {'arguments': '{"input":3}', 'name': 'magic_function'}, 'type': 'function'}]} response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 64, 'total_tokens': 78}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None} id='run-0844a504-7e6b-4ea6-a069-7017e38121ee-0' tool_calls=[{'name': 'magic_function', 'args': {'input': 3}, 'id': 'call_bTURmOn9C8zslmn0kMFeykIn'}]content='Sorry there was an error, please try again.' name='magic_function' id='00d5386f-eb23-4628-9a29-d9ce6a7098cc' tool_call_id='call_bTURmOn9C8zslmn0kMFeykIn'content='' additional_kwargs={'tool_calls': [{'id': 'call_JYqvvvWmXow2u012DuPoDHFV', 'function': {'arguments': '{"input":3}', 'name': 'magic_function'}, 'type': 'function'}]} response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 96, 'total_tokens': 110}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None} id='run-b73b1b1c-c829-4348-98cd-60b315c85448-0' tool_calls=[{'name': 'magic_function', 'args': {'input': 3}, 'id': 'call_JYqvvvWmXow2u012DuPoDHFV'}]{'input': 'what is the value of magic_function(3)?', 'output': 'Agent stopped due to max iterations.'} `trim_intermediate_steps`[​](#trim_intermediate_steps "Direct link to trim_intermediate_steps") ----------------------------------------------------------------------------------------------- With LangChain's [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor), you could trim the intermediate steps of long-running agents using [trim\_intermediate\_steps](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.trim_intermediate_steps), which is either an integer (indicating the agent should keep the last N steps) or a custom function. For instance, we could trim the value so the agent only sees the most recent intermediate step. from langchain.agents import AgentExecutor, create_tool_calling_agentfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.tools import toolfrom langchain_openai import ChatOpenAImodel = ChatOpenAI(model="gpt-4o")prompt = ChatPromptTemplate.from_messages( [ ("system", "You are a helpful assistant."), ("human", "{input}"), # Placeholders fill up a **list** of messages ("placeholder", "{agent_scratchpad}"), ])magic_step_num = 1@tooldef magic_function(input: int) -> int: """Applies a magic function to an input.""" global magic_step_num print(f"Call number: {magic_step_num}") magic_step_num += 1 return input + magic_step_numtools = [magic_function]agent = create_tool_calling_agent(model, tools, prompt=prompt)def trim_steps(steps: list): # Let's give the agent amnesia return []agent_executor = AgentExecutor( agent=agent, tools=tools, trim_intermediate_steps=trim_steps)query = "Call the magic function 4 times in sequence with the value 3. You cannot call it multiple times at once."for step in agent_executor.stream({"input": query}): pass **API Reference:**[AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html) | [create\_tool\_calling\_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.tool_calling_agent.base.create_tool_calling_agent.html) | [ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [tool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) Call number: 1Call number: 2Call number: 3Call number: 4Call number: 5Call number: 6Call number: 7Call number: 8Call number: 9Call number: 10Call number: 11Call number: 12Call number: 13Call number: 14``````outputStopping agent prematurely due to triggering stop condition``````outputCall number: 15 #### In LangGraph[​](#in-langgraph-3 "Direct link to In LangGraph") We can use the [`messages_modifier`](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent) just as before when passing in [prompt templates](#prompt-templates). from langchain_core.messages import AnyMessagefrom langgraph.errors import GraphRecursionErrorfrom langgraph.prebuilt import create_react_agentmagic_step_num = 1@tooldef magic_function(input: int) -> int: """Applies a magic function to an input.""" global magic_step_num print(f"Call number: {magic_step_num}") magic_step_num += 1 return input + magic_step_numtools = [magic_function]def _modify_messages(messages: list[AnyMessage]): # Give the agent amnesia, only keeping the original user query return [("system", "You are a helpful assistant"), messages[0]]app = create_react_agent(model, tools, messages_modifier=_modify_messages)try: for step in app.stream({"messages": [("human", query)]}, stream_mode="updates"): passexcept GraphRecursionError as e: print("Stopping agent prematurely due to triggering stop condition") Call number: 1Call number: 2Call number: 3Call number: 4Call number: 5Call number: 6Call number: 7Call number: 8Call number: 9Call number: 10Call number: 11Call number: 12Stopping agent prematurely due to triggering stop condition [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/migrate_agent.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to add message history ](/v0.2/docs/how_to/message_history/)[ Next How to retrieve using multiple vectors per document ](/v0.2/docs/how_to/multi_vector/) * [Basic Usage](#basic-usage) * [Prompt Templates](#prompt-templates) * [Memory](#memory) * [Iterating through steps](#iterating-through-steps) * [`return_intermediate_steps`](#return_intermediate_steps) * [`max_iterations`](#max_iterations) * [`max_execution_time`](#max_execution_time) * [`early_stopping_method`](#early_stopping_method) * [`trim_intermediate_steps`](#trim_intermediate_steps)
null
https://python.langchain.com/v0.2/docs/how_to/multimodal_inputs/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to pass multimodal data directly to models On this page How to pass multimodal data directly to models ============================================== Here we demonstrate how to pass multimodal input directly to models. We currently expect all input to be passed in the same format as [OpenAI expects](https://platform.openai.com/docs/guides/vision). For other model providers that support multimodal input, we have added logic inside the class to convert to the expected format. In this example we will ask a model to describe an image. image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" from langchain_core.messages import HumanMessagefrom langchain_openai import ChatOpenAImodel = ChatOpenAI(model="gpt-4o") **API Reference:**[HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) The most commonly supported way to pass in images is to pass it in as a byte string. This should work for most model integrations. import base64import httpximage_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8") message = HumanMessage( content=[ {"type": "text", "text": "describe the weather in this image"}, { "type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image_data}"}, }, ],)response = model.invoke([message])print(response.content) The weather in the image appears to be clear and pleasant. The sky is mostly blue with scattered, light clouds, suggesting a sunny day with minimal cloud cover. There is no indication of rain or strong winds, and the overall scene looks bright and calm. The lush green grass and clear visibility further indicate good weather conditions. We can feed the image URL directly in a content block of type "image\_url". Note that only some model providers support this. message = HumanMessage( content=[ {"type": "text", "text": "describe the weather in this image"}, {"type": "image_url", "image_url": {"url": image_url}}, ],)response = model.invoke([message])print(response.content) The weather in the image appears to be clear and sunny. The sky is mostly blue with a few scattered clouds, suggesting good visibility and a likely pleasant temperature. The bright sunlight is casting distinct shadows on the grass and vegetation, indicating it is likely daytime, possibly late morning or early afternoon. The overall ambiance suggests a warm and inviting day, suitable for outdoor activities. We can also pass in multiple images. message = HumanMessage( content=[ {"type": "text", "text": "are these two images the same?"}, {"type": "image_url", "image_url": {"url": image_url}}, {"type": "image_url", "image_url": {"url": image_url}}, ],)response = model.invoke([message])print(response.content) Yes, the two images are the same. They both depict a wooden boardwalk extending through a grassy field under a blue sky with light clouds. The scenery, lighting, and composition are identical. Tool calls[​](#tool-calls "Direct link to Tool calls") ------------------------------------------------------ Some multimodal models support [tool calling](/v0.2/docs/concepts/#functiontool-calling) features as well. To call tools using such models, simply bind tools to them in the [usual way](/v0.2/docs/how_to/tool_calling/), and invoke the model using content blocks of the desired type (e.g., containing image data). from typing import Literalfrom langchain_core.tools import tool@tooldef weather_tool(weather: Literal["sunny", "cloudy", "rainy"]) -> None: """Describe the weather""" passmodel_with_tools = model.bind_tools([weather_tool])message = HumanMessage( content=[ {"type": "text", "text": "describe the weather in this image"}, {"type": "image_url", "image_url": {"url": image_url}}, ],)response = model_with_tools.invoke([message])print(response.tool_calls) **API Reference:**[tool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html) [{'name': 'weather_tool', 'args': {'weather': 'sunny'}, 'id': 'call_BSX4oq4SKnLlp2WlzDhToHBr'}] [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/multimodal_inputs.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to retrieve using multiple vectors per document ](/v0.2/docs/how_to/multi_vector/)[ Next How to use multimodal prompts ](/v0.2/docs/how_to/multimodal_prompts/) * [Tool calls](#tool-calls)
null
https://python.langchain.com/v0.2/docs/tutorials/query_analysis/
* [](/v0.2/) * [Tutorials](/v0.2/docs/tutorials/) * Build a Query Analysis System On this page Build a Query Analysis System ============================= Prerequisites This guide assumes familiarity with the following concepts: * [Document loaders](/v0.2/docs/concepts/#document-loaders) * [Chat models](/v0.2/docs/concepts/#chat-models) * [Embeddings](/v0.2/docs/concepts/#embedding-models) * [Vector stores](/v0.2/docs/concepts/#vector-stores) * [Retrieval](/v0.2/docs/concepts/#retrieval) This page will show how to use query analysis in a basic end-to-end example. This will cover creating a simple search engine, showing a failure mode that occurs when passing a raw user question to that search, and then an example of how query analysis can help address that issue. There are MANY different query analysis techniques and this end-to-end example will not show all of them. For the purpose of this example, we will do retrieval over the LangChain YouTube videos. Setup[​](#setup "Direct link to Setup") --------------------------------------- #### Install dependencies[​](#install-dependencies "Direct link to Install dependencies") # %pip install -qU langchain langchain-community langchain-openai youtube-transcript-api pytube langchain-chroma #### Set environment variables[​](#set-environment-variables "Direct link to Set environment variables") We'll use OpenAI in this example: import getpassimport osos.environ["OPENAI_API_KEY"] = getpass.getpass()# Optional, uncomment to trace runs with LangSmith. Sign up here: https://smith.langchain.com.# os.environ["LANGCHAIN_TRACING_V2"] = "true"# os.environ["LANGCHAIN_API_KEY"] = getpass.getpass() ### Load documents[​](#load-documents "Direct link to Load documents") We can use the `YouTubeLoader` to load transcripts of a few LangChain videos: from langchain_community.document_loaders import YoutubeLoaderurls = [ "https://www.youtube.com/watch?v=HAn9vnJy6S4", "https://www.youtube.com/watch?v=dA1cHGACXCo", "https://www.youtube.com/watch?v=ZcEMLz27sL4", "https://www.youtube.com/watch?v=hvAPnpSfSGo", "https://www.youtube.com/watch?v=EhlPDL4QrWY", "https://www.youtube.com/watch?v=mmBo8nlu2j0", "https://www.youtube.com/watch?v=rQdibOsL1ps", "https://www.youtube.com/watch?v=28lC4fqukoc", "https://www.youtube.com/watch?v=es-9MgxB-uc", "https://www.youtube.com/watch?v=wLRHwKuKvOE", "https://www.youtube.com/watch?v=ObIltMaRJvY", "https://www.youtube.com/watch?v=DjuXACWYkkU", "https://www.youtube.com/watch?v=o7C9ld6Ln-M",]docs = []for url in urls: docs.extend(YoutubeLoader.from_youtube_url(url, add_video_info=True).load()) **API Reference:**[YoutubeLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.youtube.YoutubeLoader.html) import datetime# Add some additional metadata: what year the video was publishedfor doc in docs: doc.metadata["publish_year"] = int( datetime.datetime.strptime( doc.metadata["publish_date"], "%Y-%m-%d %H:%M:%S" ).strftime("%Y") ) Here are the titles of the videos we've loaded: [doc.metadata["title"] for doc in docs] ['OpenGPTs', 'Building a web RAG chatbot: using LangChain, Exa (prev. Metaphor), LangSmith, and Hosted Langserve', 'Streaming Events: Introducing a new `stream_events` method', 'LangGraph: Multi-Agent Workflows', 'Build and Deploy a RAG app with Pinecone Serverless', 'Auto-Prompt Builder (with Hosted LangServe)', 'Build a Full Stack RAG App With TypeScript', 'Getting Started with Multi-Modal LLMs', 'SQL Research Assistant', 'Skeleton-of-Thought: Building a New Template from Scratch', 'Benchmarking RAG over LangChain Docs', 'Building a Research Assistant from Scratch', 'LangServe and LangChain Templates Webinar'] Here's the metadata associated with each video. We can see that each document also has a title, view count, publication date, and length: docs[0].metadata {'source': 'HAn9vnJy6S4', 'title': 'OpenGPTs', 'description': 'Unknown', 'view_count': 7210, 'thumbnail_url': 'https://i.ytimg.com/vi/HAn9vnJy6S4/hq720.jpg', 'publish_date': '2024-01-31 00:00:00', 'length': 1530, 'author': 'LangChain', 'publish_year': 2024} And here's a sample from a document's contents: docs[0].page_content[:500] "hello today I want to talk about open gpts open gpts is a project that we built here at linkchain uh that replicates the GPT store in a few ways so it creates uh end user-facing friendly interface to create different Bots and these Bots can have access to different tools and they can uh be given files to retrieve things over and basically it's a way to create a variety of bots and expose the configuration of these Bots to end users it's all open source um it can be used with open AI it can be us" ### Indexing documents[​](#indexing-documents "Direct link to Indexing documents") Whenever we perform retrieval we need to create an index of documents that we can query. We'll use a vector store to index our documents, and we'll chunk them first to make our retrievals more concise and precise: from langchain_chroma import Chromafrom langchain_openai import OpenAIEmbeddingsfrom langchain_text_splitters import RecursiveCharacterTextSplittertext_splitter = RecursiveCharacterTextSplitter(chunk_size=2000)chunked_docs = text_splitter.split_documents(docs)embeddings = OpenAIEmbeddings(model="text-embedding-3-small")vectorstore = Chroma.from_documents( chunked_docs, embeddings,) **API Reference:**[OpenAIEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html) | [RecursiveCharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.RecursiveCharacterTextSplitter.html) Retrieval without query analysis[​](#retrieval-without-query-analysis "Direct link to Retrieval without query analysis") ------------------------------------------------------------------------------------------------------------------------ We can perform similarity search on a user question directly to find chunks relevant to the question: search_results = vectorstore.similarity_search("how do I build a RAG agent")print(search_results[0].metadata["title"])print(search_results[0].page_content[:500]) Build and Deploy a RAG app with Pinecone Serverlesshi this is Lance from the Lang chain team and today we're going to be building and deploying a rag app using pine con serval list from scratch so we're going to kind of walk through all the code required to do this and I'll use these slides as kind of a guide to kind of lay the the ground work um so first what is rag so under capoy has this pretty nice visualization that shows LMS as a kernel of a new kind of operating system and of course one of the core components of our operating system is th This works pretty well! Our first result is quite relevant to the question. What if we wanted to search for results from a specific time period? search_results = vectorstore.similarity_search("videos on RAG published in 2023")print(search_results[0].metadata["title"])print(search_results[0].metadata["publish_date"])print(search_results[0].page_content[:500]) OpenGPTs2024-01-31hardcoded that it will always do a retrieval step here the assistant decides whether to do a retrieval step or not sometimes this is good sometimes this is bad sometimes it you don't need to do a retrieval step when I said hi it didn't need to call it tool um but other times you know the the llm might mess up and not realize that it needs to do a retrieval step and so the rag bot will always do a retrieval step so it's more focused there because this is also a simpler architecture so it's always Our first result is from 2024 (despite us asking for videos from 2023), and not very relevant to the input. Since we're just searching against document contents, there's no way for the results to be filtered on any document attributes. This is just one failure mode that can arise. Let's now take a look at how a basic form of query analysis can fix it! Query analysis[​](#query-analysis "Direct link to Query analysis") ------------------------------------------------------------------ We can use query analysis to improve the results of retrieval. This will involve defining a **query schema** that contains some date filters and use a function-calling model to convert a user question into a structured queries. ### Query schema[​](#query-schema "Direct link to Query schema") In this case we'll have explicit min and max attributes for publication date so that it can be filtered on. from typing import Optionalfrom langchain_core.pydantic_v1 import BaseModel, Fieldclass Search(BaseModel): """Search over a database of tutorial videos about a software library.""" query: str = Field( ..., description="Similarity search query applied to video transcripts.", ) publish_year: Optional[int] = Field(None, description="Year video was published") ### Query generation[​](#query-generation "Direct link to Query generation") To convert user questions to structured queries we'll make use of OpenAI's tool-calling API. Specifically we'll use the new [ChatModel.with\_structured\_output()](/v0.2/docs/how_to/structured_output/) constructor to handle passing the schema to the model and parsing the output. from langchain_core.prompts import ChatPromptTemplatefrom langchain_core.runnables import RunnablePassthroughfrom langchain_openai import ChatOpenAIsystem = """You are an expert at converting user questions into database queries. \You have access to a database of tutorial videos about a software library for building LLM-powered applications. \Given a question, return a list of database queries optimized to retrieve the most relevant results.If there are acronyms or words you are not familiar with, do not try to rephrase them."""prompt = ChatPromptTemplate.from_messages( [ ("system", system), ("human", "{question}"), ])llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)structured_llm = llm.with_structured_output(Search)query_analyzer = {"question": RunnablePassthrough()} | prompt | structured_llm **API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html) | [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) /Users/bagatur/langchain/libs/core/langchain_core/_api/beta_decorator.py:86: LangChainBetaWarning: The function `with_structured_output` is in beta. It is actively being worked on, so the API may change. warn_beta( Let's see what queries our analyzer generates for the questions we searched earlier: query_analyzer.invoke("how do I build a RAG agent") Search(query='build RAG agent', publish_year=None) query_analyzer.invoke("videos on RAG published in 2023") Search(query='RAG', publish_year=2023) Retrieval with query analysis[​](#retrieval-with-query-analysis "Direct link to Retrieval with query analysis") --------------------------------------------------------------------------------------------------------------- Our query analysis looks pretty good; now let's try using our generated queries to actually perform retrieval. **Note:** in our example, we specified `tool_choice="Search"`. This will force the LLM to call one - and only one - tool, meaning that we will always have one optimized query to look up. Note that this is not always the case - see other guides for how to deal with situations when no - or multiple - optmized queries are returned. from typing import Listfrom langchain_core.documents import Document **API Reference:**[Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) def retrieval(search: Search) -> List[Document]: if search.publish_year is not None: # This is syntax specific to Chroma, # the vector database we are using. _filter = {"publish_year": {"$eq": search.publish_year}} else: _filter = None return vectorstore.similarity_search(search.query, filter=_filter) retrieval_chain = query_analyzer | retrieval We can now run this chain on the problematic input from before, and see that it yields only results from that year! results = retrieval_chain.invoke("RAG tutorial published in 2023") [(doc.metadata["title"], doc.metadata["publish_date"]) for doc in results] [('Getting Started with Multi-Modal LLMs', '2023-12-20 00:00:00'), ('LangServe and LangChain Templates Webinar', '2023-11-02 00:00:00'), ('Getting Started with Multi-Modal LLMs', '2023-12-20 00:00:00'), ('Building a Research Assistant from Scratch', '2023-11-16 00:00:00')] [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/tutorials/query_analysis.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous Build a Simple LLM Application with LCEL ](/v0.2/docs/tutorials/llm_chain/)[ Next Build a Chatbot ](/v0.2/docs/tutorials/chatbot/) * [Setup](#setup) * [Load documents](#load-documents) * [Indexing documents](#indexing-documents) * [Retrieval without query analysis](#retrieval-without-query-analysis) * [Query analysis](#query-analysis) * [Query schema](#query-schema) * [Query generation](#query-generation) * [Retrieval with query analysis](#retrieval-with-query-analysis)
null
https://python.langchain.com/v0.2/docs/how_to/
* [](/v0.2/) * How-to guides On this page How-to guides ============= Here you’ll find answers to “How do I….?” types of questions. These guides are _goal-oriented_ and _concrete_; they're meant to help you complete a specific task. For conceptual explanations see the [Conceptual guide](/v0.2/docs/concepts/). For end-to-end walkthroughs see [Tutorials](/v0.2/docs/tutorials/). For comprehensive descriptions of every class and function see the [API Reference](https://api.python.langchain.com/en/latest/). Installation[​](#installation "Direct link to Installation") ------------------------------------------------------------ * [How to: install LangChain packages](/v0.2/docs/how_to/installation/) * [How to: use LangChain with different Pydantic versions](/v0.2/docs/how_to/pydantic_compatibility/) Key features[​](#key-features "Direct link to Key features") ------------------------------------------------------------ This highlights functionality that is core to using LangChain. * [How to: return structured data from a model](/v0.2/docs/how_to/structured_output/) * [How to: use a model to call tools](/v0.2/docs/how_to/tool_calling/) * [How to: stream runnables](/v0.2/docs/how_to/streaming/) * [How to: debug your LLM apps](/v0.2/docs/how_to/debugging/) LangChain Expression Language (LCEL)[​](#langchain-expression-language-lcel "Direct link to LangChain Expression Language (LCEL)") ---------------------------------------------------------------------------------------------------------------------------------- [LangChain Expression Language](/v0.2/docs/concepts/#langchain-expression-language-lcel) is a way to create arbitrary custom chains. It is built on the [Runnable](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html) protocol. [**LCEL cheatsheet**](/v0.2/docs/how_to/lcel_cheatsheet/): For a quick overview of how to use the main LCEL primitives. * [How to: chain runnables](/v0.2/docs/how_to/sequence/) * [How to: stream runnables](/v0.2/docs/how_to/streaming/) * [How to: invoke runnables in parallel](/v0.2/docs/how_to/parallel/) * [How to: add default invocation args to runnables](/v0.2/docs/how_to/binding/) * [How to: turn any function into a runnable](/v0.2/docs/how_to/functions/) * [How to: pass through inputs from one chain step to the next](/v0.2/docs/how_to/passthrough/) * [How to: configure runnable behavior at runtime](/v0.2/docs/how_to/configure/) * [How to: add message history (memory) to a chain](/v0.2/docs/how_to/message_history/) * [How to: route between sub-chains](/v0.2/docs/how_to/routing/) * [How to: create a dynamic (self-constructing) chain](/v0.2/docs/how_to/dynamic_chain/) * [How to: inspect runnables](/v0.2/docs/how_to/inspect/) * [How to: add fallbacks to a runnable](/v0.2/docs/how_to/fallbacks/) Components[​](#components "Direct link to Components") ------------------------------------------------------ These are the core building blocks you can use when building applications. ### Prompt templates[​](#prompt-templates "Direct link to Prompt templates") [Prompt Templates](/v0.2/docs/concepts/#prompt-templates) are responsible for formatting user input into a format that can be passed to a language model. * [How to: use few shot examples](/v0.2/docs/how_to/few_shot_examples/) * [How to: use few shot examples in chat models](/v0.2/docs/how_to/few_shot_examples_chat/) * [How to: partially format prompt templates](/v0.2/docs/how_to/prompts_partial/) * [How to: compose prompts together](/v0.2/docs/how_to/prompts_composition/) ### Example selectors[​](#example-selectors "Direct link to Example selectors") [Example Selectors](/v0.2/docs/concepts/#example-selectors) are responsible for selecting the correct few shot examples to pass to the prompt. * [How to: use example selectors](/v0.2/docs/how_to/example_selectors/) * [How to: select examples by length](/v0.2/docs/how_to/example_selectors_length_based/) * [How to: select examples by semantic similarity](/v0.2/docs/how_to/example_selectors_similarity/) * [How to: select examples by semantic ngram overlap](/v0.2/docs/how_to/example_selectors_ngram/) * [How to: select examples by maximal marginal relevance](/v0.2/docs/how_to/example_selectors_mmr/) ### Chat models[​](#chat-models "Direct link to Chat models") [Chat Models](/v0.2/docs/concepts/#chat-models) are newer forms of language models that take messages in and output a message. * [How to: do function/tool calling](/v0.2/docs/how_to/tool_calling/) * [How to: get models to return structured output](/v0.2/docs/how_to/structured_output/) * [How to: cache model responses](/v0.2/docs/how_to/chat_model_caching/) * [How to: get log probabilities](/v0.2/docs/how_to/logprobs/) * [How to: create a custom chat model class](/v0.2/docs/how_to/custom_chat_model/) * [How to: stream a response back](/v0.2/docs/how_to/chat_streaming/) * [How to: track token usage](/v0.2/docs/how_to/chat_token_usage_tracking/) * [How to: track response metadata across providers](/v0.2/docs/how_to/response_metadata/) * [How to: let your end users choose their model](/v0.2/docs/how_to/chat_models_universal_init/) * [How to: use chat model to call tools](/v0.2/docs/how_to/tool_calling/) * [How to: stream tool calls](/v0.2/docs/how_to/tool_streaming/) * [How to: few shot prompt tool behavior](/v0.2/docs/how_to/tools_few_shot/) * [How to: bind model-specific formated tools](/v0.2/docs/how_to/tools_model_specific/) * [How to: force specific tool call](/v0.2/docs/how_to/tool_choice/) * [How to: init any model in one line](/v0.2/docs/how_to/chat_models_universal_init/) ### Messages[​](#messages "Direct link to Messages") [Messages](/v0.2/docs/concepts/#messages) are the input and output of chat models. They have some `content` and a `role`, which describes the source of the message. * [How to: trim messages](/v0.2/docs/how_to/trim_messages/) * [How to: filter messages](/v0.2/docs/how_to/filter_messages/) * [How to: merge consecutive messages of the same type](/v0.2/docs/how_to/merge_message_runs/) ### LLMs[​](#llms "Direct link to LLMs") What LangChain calls [LLMs](/v0.2/docs/concepts/#llms) are older forms of language models that take a string in and output a string. * [How to: cache model responses](/v0.2/docs/how_to/llm_caching/) * [How to: create a custom LLM class](/v0.2/docs/how_to/custom_llm/) * [How to: stream a response back](/v0.2/docs/how_to/streaming_llm/) * [How to: track token usage](/v0.2/docs/how_to/llm_token_usage_tracking/) * [How to: work with local LLMs](/v0.2/docs/how_to/local_llms/) ### Output parsers[​](#output-parsers "Direct link to Output parsers") [Output Parsers](/v0.2/docs/concepts/#output-parsers) are responsible for taking the output of an LLM and parsing into more structured format. * [How to: use output parsers to parse an LLM response into structured format](/v0.2/docs/how_to/output_parser_structured/) * [How to: parse JSON output](/v0.2/docs/how_to/output_parser_json/) * [How to: parse XML output](/v0.2/docs/how_to/output_parser_xml/) * [How to: parse YAML output](/v0.2/docs/how_to/output_parser_yaml/) * [How to: retry when output parsing errors occur](/v0.2/docs/how_to/output_parser_retry/) * [How to: try to fix errors in output parsing](/v0.2/docs/how_to/output_parser_fixing/) * [How to: write a custom output parser class](/v0.2/docs/how_to/output_parser_custom/) ### Document loaders[​](#document-loaders "Direct link to Document loaders") [Document Loaders](/v0.2/docs/concepts/#document-loaders) are responsible for loading documents from a variety of sources. * [How to: load CSV data](/v0.2/docs/how_to/document_loader_csv/) * [How to: load data from a directory](/v0.2/docs/how_to/document_loader_directory/) * [How to: load HTML data](/v0.2/docs/how_to/document_loader_html/) * [How to: load JSON data](/v0.2/docs/how_to/document_loader_json/) * [How to: load Markdown data](/v0.2/docs/how_to/document_loader_markdown/) * [How to: load Microsoft Office data](/v0.2/docs/how_to/document_loader_office_file/) * [How to: load PDF files](/v0.2/docs/how_to/document_loader_pdf/) * [How to: write a custom document loader](/v0.2/docs/how_to/document_loader_custom/) ### Text splitters[​](#text-splitters "Direct link to Text splitters") [Text Splitters](/v0.2/docs/concepts/#text-splitters) take a document and split into chunks that can be used for retrieval. * [How to: recursively split text](/v0.2/docs/how_to/recursive_text_splitter/) * [How to: split by HTML headers](/v0.2/docs/how_to/HTML_header_metadata_splitter/) * [How to: split by HTML sections](/v0.2/docs/how_to/HTML_section_aware_splitter/) * [How to: split by character](/v0.2/docs/how_to/character_text_splitter/) * [How to: split code](/v0.2/docs/how_to/code_splitter/) * [How to: split Markdown by headers](/v0.2/docs/how_to/markdown_header_metadata_splitter/) * [How to: recursively split JSON](/v0.2/docs/how_to/recursive_json_splitter/) * [How to: split text into semantic chunks](/v0.2/docs/how_to/semantic-chunker/) * [How to: split by tokens](/v0.2/docs/how_to/split_by_token/) ### Embedding models[​](#embedding-models "Direct link to Embedding models") [Embedding Models](/v0.2/docs/concepts/#embedding-models) take a piece of text and create a numerical representation of it. * [How to: embed text data](/v0.2/docs/how_to/embed_text/) * [How to: cache embedding results](/v0.2/docs/how_to/caching_embeddings/) ### Vector stores[​](#vector-stores "Direct link to Vector stores") [Vector stores](/v0.2/docs/concepts/#vector-stores) are databases that can efficiently store and retrieve embeddings. * [How to: use a vector store to retrieve data](/v0.2/docs/how_to/vectorstores/) ### Retrievers[​](#retrievers "Direct link to Retrievers") [Retrievers](/v0.2/docs/concepts/#retrievers) are responsible for taking a query and returning relevant documents. * [How to: use a vector store to retrieve data](/v0.2/docs/how_to/vectorstore_retriever/) * [How to: generate multiple queries to retrieve data for](/v0.2/docs/how_to/MultiQueryRetriever/) * [How to: use contextual compression to compress the data retrieved](/v0.2/docs/how_to/contextual_compression/) * [How to: write a custom retriever class](/v0.2/docs/how_to/custom_retriever/) * [How to: add similarity scores to retriever results](/v0.2/docs/how_to/add_scores_retriever/) * [How to: combine the results from multiple retrievers](/v0.2/docs/how_to/ensemble_retriever/) * [How to: reorder retrieved results to mitigate the "lost in the middle" effect](/v0.2/docs/how_to/long_context_reorder/) * [How to: generate multiple embeddings per document](/v0.2/docs/how_to/multi_vector/) * [How to: retrieve the whole document for a chunk](/v0.2/docs/how_to/parent_document_retriever/) * [How to: generate metadata filters](/v0.2/docs/how_to/self_query/) * [How to: create a time-weighted retriever](/v0.2/docs/how_to/time_weighted_vectorstore/) * [How to: use hybrid vector and keyword retrieval](/v0.2/docs/how_to/hybrid/) ### Indexing[​](#indexing "Direct link to Indexing") Indexing is the process of keeping your vectorstore in-sync with the underlying data source. * [How to: reindex data to keep your vectorstore in-sync with the underlying data source](/v0.2/docs/how_to/indexing/) ### Tools[​](#tools "Direct link to Tools") LangChain [Tools](/v0.2/docs/concepts/#tools) contain a description of the tool (to pass to the language model) as well as the implementation of the function to call. * [How to: create custom tools](/v0.2/docs/how_to/custom_tools/) * [How to: use built-in tools and built-in toolkits](/v0.2/docs/how_to/tools_builtin/) * [How to: use chat model to call tools](/v0.2/docs/how_to/tool_calling/) * [How to: pass tool results back to model](/v0.2/docs/how_to/tool_results_pass_to_model/) * [How to: add ad-hoc tool calling capability to LLMs and chat models](/v0.2/docs/how_to/tools_prompting/) * [How to: pass run time values to tools](/v0.2/docs/how_to/tool_runtime/) * [How to: add a human in the loop to tool usage](/v0.2/docs/how_to/tools_human/) * [How to: handle errors when calling tools](/v0.2/docs/how_to/tools_error/) * [How to: disable parallel tool calling](/v0.2/docs/how_to/tool_choice/) ### Multimodal[​](#multimodal "Direct link to Multimodal") * [How to: pass multimodal data directly to models](/v0.2/docs/how_to/multimodal_inputs/) * [How to: use multimodal prompts](/v0.2/docs/how_to/multimodal_prompts/) ### Agents[​](#agents "Direct link to Agents") note For in depth how-to guides for agents, please check out [LangGraph](https://github.com/langchain-ai/langgraph) documentation. * [How to: use legacy LangChain Agents (AgentExecutor)](/v0.2/docs/how_to/agent_executor/) * [How to: migrate from legacy LangChain agents to LangGraph](/v0.2/docs/how_to/migrate_agent/) ### Callbacks[​](#callbacks "Direct link to Callbacks") [Callbacks](/v0.2/docs/concepts/#callbacks) allow you to hook into the various stages of your LLM application's execution. * [How to: pass in callbacks at runtime](/v0.2/docs/how_to/callbacks_runtime/) * [How to: attach callbacks to a module](/v0.2/docs/how_to/callbacks_attach/) * [How to: pass callbacks into a module constructor](/v0.2/docs/how_to/callbacks_constructor/) * [How to: create custom callback handlers](/v0.2/docs/how_to/custom_callbacks/) * [How to: use callbacks in async environments](/v0.2/docs/how_to/callbacks_async/) ### Custom[​](#custom "Direct link to Custom") All of LangChain components can easily be extended to support your own versions. * [How to: create a custom chat model class](/v0.2/docs/how_to/custom_chat_model/) * [How to: create a custom LLM class](/v0.2/docs/how_to/custom_llm/) * [How to: write a custom retriever class](/v0.2/docs/how_to/custom_retriever/) * [How to: write a custom document loader](/v0.2/docs/how_to/document_loader_custom/) * [How to: write a custom output parser class](/v0.2/docs/how_to/output_parser_custom/) * [How to: create custom callback handlers](/v0.2/docs/how_to/custom_callbacks/) * [How to: define a custom tool](/v0.2/docs/how_to/custom_tools/) ### Serialization[​](#serialization "Direct link to Serialization") * [How to: save and load LangChain objects](/v0.2/docs/how_to/serialization/) Use cases[​](#use-cases "Direct link to Use cases") --------------------------------------------------- These guides cover use-case specific details. ### Q&A with RAG[​](#qa-with-rag "Direct link to Q&A with RAG") Retrieval Augmented Generation (RAG) is a way to connect LLMs to external sources of data. For a high-level tutorial on RAG, check out [this guide](/v0.2/docs/tutorials/rag/). * [How to: add chat history](/v0.2/docs/how_to/qa_chat_history_how_to/) * [How to: stream](/v0.2/docs/how_to/qa_streaming/) * [How to: return sources](/v0.2/docs/how_to/qa_sources/) * [How to: return citations](/v0.2/docs/how_to/qa_citations/) * [How to: do per-user retrieval](/v0.2/docs/how_to/qa_per_user/) ### Extraction[​](#extraction "Direct link to Extraction") Extraction is when you use LLMs to extract structured information from unstructured text. For a high level tutorial on extraction, check out [this guide](/v0.2/docs/tutorials/extraction/). * [How to: use reference examples](/v0.2/docs/how_to/extraction_examples/) * [How to: handle long text](/v0.2/docs/how_to/extraction_long_text/) * [How to: do extraction without using function calling](/v0.2/docs/how_to/extraction_parse/) ### Chatbots[​](#chatbots "Direct link to Chatbots") Chatbots involve using an LLM to have a conversation. For a high-level tutorial on building chatbots, check out [this guide](/v0.2/docs/tutorials/chatbot/). * [How to: manage memory](/v0.2/docs/how_to/chatbots_memory/) * [How to: do retrieval](/v0.2/docs/how_to/chatbots_retrieval/) * [How to: use tools](/v0.2/docs/how_to/chatbots_tools/) * [How to: manage large chat history](/v0.2/docs/how_to/trim_messages/) ### Query analysis[​](#query-analysis "Direct link to Query analysis") Query Analysis is the task of using an LLM to generate a query to send to a retriever. For a high-level tutorial on query analysis, check out [this guide](/v0.2/docs/tutorials/query_analysis/). * [How to: add examples to the prompt](/v0.2/docs/how_to/query_few_shot/) * [How to: handle cases where no queries are generated](/v0.2/docs/how_to/query_no_queries/) * [How to: handle multiple queries](/v0.2/docs/how_to/query_multiple_queries/) * [How to: handle multiple retrievers](/v0.2/docs/how_to/query_multiple_retrievers/) * [How to: construct filters](/v0.2/docs/how_to/query_constructing_filters/) * [How to: deal with high cardinality categorical variables](/v0.2/docs/how_to/query_high_cardinality/) ### Q&A over SQL + CSV[​](#qa-over-sql--csv "Direct link to Q&A over SQL + CSV") You can use LLMs to do question answering over tabular data. For a high-level tutorial, check out [this guide](/v0.2/docs/tutorials/sql_qa/). * [How to: use prompting to improve results](/v0.2/docs/how_to/sql_prompting/) * [How to: do query validation](/v0.2/docs/how_to/sql_query_checking/) * [How to: deal with large databases](/v0.2/docs/how_to/sql_large_db/) * [How to: deal with CSV files](/v0.2/docs/how_to/sql_csv/) ### Q&A over graph databases[​](#qa-over-graph-databases "Direct link to Q&A over graph databases") You can use an LLM to do question answering over graph databases. For a high-level tutorial, check out [this guide](/v0.2/docs/tutorials/graph/). * [How to: map values to a database](/v0.2/docs/how_to/graph_mapping/) * [How to: add a semantic layer over the database](/v0.2/docs/how_to/graph_semantic/) * [How to: improve results with prompting](/v0.2/docs/how_to/graph_prompting/) * [How to: construct knowledge graphs](/v0.2/docs/how_to/graph_constructing/) [LangGraph](https://langchain-ai.github.io/langgraph)[​](#langgraph "Direct link to langgraph") ----------------------------------------------------------------------------------------------- LangGraph is an extension of LangChain aimed at building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph. LangGraph documentation is currently hosted on a separate site. You can peruse [LangGraph how-to guides here](https://langchain-ai.github.io/langgraph/how-tos/). [LangSmith](https://docs.smith.langchain.com/)[​](#langsmith "Direct link to langsmith") ---------------------------------------------------------------------------------------- LangSmith allows you to closely trace, monitor and evaluate your LLM application. It seamlessly integrates with LangChain and LangGraph, and you can use it to inspect and debug individual steps of your chains and agents as you build. LangSmith documentation is hosted on a separate site. You can peruse [LangSmith how-to guides here](https://docs.smith.langchain.com/how_to_guides/). ### Evaluation[​](#evaluation "Direct link to Evaluation") Evaluating performance is a vital part of building LLM-powered applications. LangSmith helps with every step of the process from creating a dataset to defining metrics to running evaluators. To learn more, check out the [LangSmith evaluation how-to guides](https://docs.smith.langchain.com/how_to_guides#evaluation). [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/index.mdx) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous Summarize Text ](/v0.2/docs/tutorials/summarization/)[ Next How-to guides ](/v0.2/docs/how_to/) * [Installation](#installation) * [Key features](#key-features) * [LangChain Expression Language (LCEL)](#langchain-expression-language-lcel) * [Components](#components) * [Prompt templates](#prompt-templates) * [Example selectors](#example-selectors) * [Chat models](#chat-models) * [Messages](#messages) * [LLMs](#llms) * [Output parsers](#output-parsers) * [Document loaders](#document-loaders) * [Text splitters](#text-splitters) * [Embedding models](#embedding-models) * [Vector stores](#vector-stores) * [Retrievers](#retrievers) * [Indexing](#indexing) * [Tools](#tools) * [Multimodal](#multimodal) * [Agents](#agents) * [Callbacks](#callbacks) * [Custom](#custom) * [Serialization](#serialization) * [Use cases](#use-cases) * [Q&A with RAG](#qa-with-rag) * [Extraction](#extraction) * [Chatbots](#chatbots) * [Query analysis](#query-analysis) * [Q&A over SQL + CSV](#qa-over-sql--csv) * [Q&A over graph databases](#qa-over-graph-databases) * [LangGraph](#langgraph) * [LangSmith](#langsmith) * [Evaluation](#evaluation)
null
https://python.langchain.com/v0.2/docs/tutorials/extraction/
* [](/v0.2/) * [Tutorials](/v0.2/docs/tutorials/) * Build an Extraction Chain On this page Build an Extraction Chain ========================= Prerequisites This guide assumes familiarity with the following concepts: * [Chat Models](/v0.2/docs/concepts/#chat-models) * [Tools](/v0.2/docs/concepts/#tools) * [Tool calling](/v0.2/docs/concepts/#function-tool-calling) In this tutorial, we will build a chain to extract structured information from unstructured text. info This tutorial will only work with models that support **tool calling** Setup[​](#setup "Direct link to Setup") --------------------------------------- ### Jupyter Notebook[​](#jupyter-notebook "Direct link to Jupyter Notebook") This guide (and most of the other guides in the documentation) uses [Jupyter notebooks](https://jupyter.org/) and assumes the reader is as well. Jupyter notebooks are perfect for learning how to work with LLM systems because oftentimes things can go wrong (unexpected output, API down, etc) and going through guides in an interactive environment is a great way to better understand them. This and other tutorials are perhaps most conveniently run in a Jupyter notebook. See [here](https://jupyter.org/install) for instructions on how to install. ### Installation[​](#installation "Direct link to Installation") To install LangChain run: * Pip * Conda pip install langchain conda install langchain -c conda-forge For more details, see our [Installation guide](/v0.2/docs/how_to/installation/). ### LangSmith[​](#langsmith "Direct link to LangSmith") Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with [LangSmith](https://smith.langchain.com). After you sign up at the link above, make sure to set your environment variables to start logging traces: export LANGCHAIN_TRACING_V2="true"export LANGCHAIN_API_KEY="..." Or, if in a notebook, you can set them with: import getpassimport osos.environ["LANGCHAIN_TRACING_V2"] = "true"os.environ["LANGCHAIN_API_KEY"] = getpass.getpass() The Schema[​](#the-schema "Direct link to The Schema") ------------------------------------------------------ First, we need to describe what information we want to extract from the text. We'll use Pydantic to define an example schema to extract personal information. from typing import Optionalfrom langchain_core.pydantic_v1 import BaseModel, Fieldclass Person(BaseModel): """Information about a person.""" # ^ Doc-string for the entity Person. # This doc-string is sent to the LLM as the description of the schema Person, # and it can help to improve extraction results. # Note that: # 1. Each field is an `optional` -- this allows the model to decline to extract it! # 2. Each field has a `description` -- this description is used by the LLM. # Having a good description can help improve extraction results. name: Optional[str] = Field(default=None, description="The name of the person") hair_color: Optional[str] = Field( default=None, description="The color of the person's hair if known" ) height_in_meters: Optional[str] = Field( default=None, description="Height measured in meters" ) There are two best practices when defining schema: 1. Document the **attributes** and the **schema** itself: This information is sent to the LLM and is used to improve the quality of information extraction. 2. Do not force the LLM to make up information! Above we used `Optional` for the attributes allowing the LLM to output `None` if it doesn't know the answer. info For best performance, document the schema well and make sure the model isn't force to return results if there's no information to be extracted in the text. The Extractor[​](#the-extractor "Direct link to The Extractor") --------------------------------------------------------------- Let's create an information extractor using the schema we defined above. from typing import Optionalfrom langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholderfrom langchain_core.pydantic_v1 import BaseModel, Field# Define a custom prompt to provide instructions and any additional context.# 1) You can add examples into the prompt template to improve extraction quality# 2) Introduce additional parameters to take context into account (e.g., include metadata# about the document from which the text was extracted.)prompt = ChatPromptTemplate.from_messages( [ ( "system", "You are an expert extraction algorithm. " "Only extract relevant information from the text. " "If you do not know the value of an attribute asked to extract, " "return null for the attribute's value.", ), # Please see the how-to about improving performance with # reference examples. # MessagesPlaceholder('examples'), ("human", "{text}"), ]) **API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) | [MessagesPlaceholder](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.MessagesPlaceholder.html) We need to use a model that supports function/tool calling. Please review [the documentation](/v0.2/docs/concepts/#function-tool-calling) for list of some models that can be used with this API. from langchain_mistralai import ChatMistralAIllm = ChatMistralAI(model="mistral-large-latest", temperature=0)runnable = prompt | llm.with_structured_output(schema=Person) **API Reference:**[ChatMistralAI](https://api.python.langchain.com/en/latest/chat_models/langchain_mistralai.chat_models.ChatMistralAI.html) /Users/harrisonchase/workplace/langchain/libs/core/langchain_core/_api/beta_decorator.py:87: LangChainBetaWarning: The method `ChatMistralAI.with_structured_output` is in beta. It is actively being worked on, so the API may change. warn_beta( Let's test it out text = "Alan Smith is 6 feet tall and has blond hair."runnable.invoke({"text": text}) Person(name='Alan Smith', hair_color='blond', height_in_meters='1.83') info Extraction is Generative 🤯 LLMs are generative models, so they can do some pretty cool things like correctly extract the height of the person in meters even though it was provided in feet! We can see the LangSmith trace here: [https://smith.langchain.com/public/44b69a63-3b3b-47b8-8a6d-61b46533f015/r](https://smith.langchain.com/public/44b69a63-3b3b-47b8-8a6d-61b46533f015/r) Multiple Entities[​](#multiple-entities "Direct link to Multiple Entities") --------------------------------------------------------------------------- In **most cases**, you should be extracting a list of entities rather than a single entity. This can be easily achieved using pydantic by nesting models inside one another. from typing import List, Optionalfrom langchain_core.pydantic_v1 import BaseModel, Fieldclass Person(BaseModel): """Information about a person.""" # ^ Doc-string for the entity Person. # This doc-string is sent to the LLM as the description of the schema Person, # and it can help to improve extraction results. # Note that: # 1. Each field is an `optional` -- this allows the model to decline to extract it! # 2. Each field has a `description` -- this description is used by the LLM. # Having a good description can help improve extraction results. name: Optional[str] = Field(default=None, description="The name of the person") hair_color: Optional[str] = Field( default=None, description="The color of the person's hair if known" ) height_in_meters: Optional[str] = Field( default=None, description="Height measured in meters" )class Data(BaseModel): """Extracted data about people.""" # Creates a model so that we can extract multiple entities. people: List[Person] info Extraction might not be perfect here. Please continue to see how to use **Reference Examples** to improve the quality of extraction, and see the **guidelines** section! runnable = prompt | llm.with_structured_output(schema=Data)text = "My name is Jeff, my hair is black and i am 6 feet tall. Anna has the same color hair as me."runnable.invoke({"text": text}) Data(people=[Person(name='Jeff', hair_color=None, height_in_meters=None), Person(name='Anna', hair_color=None, height_in_meters=None)]) tip When the schema accommodates the extraction of **multiple entities**, it also allows the model to extract **no entities** if no relevant information is in the text by providing an empty list. This is usually a **good** thing! It allows specifying **required** attributes on an entity without necessarily forcing the model to detect this entity. We can see the LangSmith trace here: [https://smith.langchain.com/public/7173764d-5e76-45fe-8496-84460bd9cdef/r](https://smith.langchain.com/public/7173764d-5e76-45fe-8496-84460bd9cdef/r) Next steps[​](#next-steps "Direct link to Next steps") ------------------------------------------------------ Now that you understand the basics of extraction with LangChain, you're ready to proceed to the rest of the how-to guides: * [Add Examples](/v0.2/docs/how_to/extraction_examples/): Learn how to use **reference examples** to improve performance. * [Handle Long Text](/v0.2/docs/how_to/extraction_long_text/): What should you do if the text does not fit into the context window of the LLM? * [Use a Parsing Approach](/v0.2/docs/how_to/extraction_parse/): Use a prompt based approach to extract with models that do not support **tool/function calling**. [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/tutorials/extraction.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous Conversational RAG ](/v0.2/docs/tutorials/qa_chat_history/)[ Next Build an Agent ](/v0.2/docs/tutorials/agents/) * [Setup](#setup) * [Jupyter Notebook](#jupyter-notebook) * [Installation](#installation) * [LangSmith](#langsmith) * [The Schema](#the-schema) * [The Extractor](#the-extractor) * [Multiple Entities](#multiple-entities) * [Next steps](#next-steps)
null
https://python.langchain.com/v0.2/docs/how_to/chatbots_memory/
* [](/v0.2/) * [How-to guides](/v0.2/docs/how_to/) * How to add memory to chatbots On this page How to add memory to chatbots ============================= A key feature of chatbots is their ability to use content of previous conversation turns as context. This state management can take several forms, including: * Simply stuffing previous messages into a chat model prompt. * The above, but trimming old messages to reduce the amount of distracting information the model has to deal with. * More complex modifications like synthesizing summaries for long running conversations. We'll go into more detail on a few techniques below! Setup[​](#setup "Direct link to Setup") --------------------------------------- You'll need to install a few packages, and have your OpenAI API key set as an environment variable named `OPENAI_API_KEY`: %pip install --upgrade --quiet langchain langchain-openai# Set env var OPENAI_API_KEY or load from a .env file:import dotenvdotenv.load_dotenv() WARNING: You are using pip version 22.0.4; however, version 23.3.2 is available.You should consider upgrading via the '/Users/jacoblee/.pyenv/versions/3.10.5/bin/python -m pip install --upgrade pip' command.Note: you may need to restart the kernel to use updated packages. True Let's also set up a chat model that we'll use for the below examples. from langchain_openai import ChatOpenAIchat = ChatOpenAI(model="gpt-3.5-turbo-0125") **API Reference:**[ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) Message passing[​](#message-passing "Direct link to Message passing") --------------------------------------------------------------------- The simplest form of memory is simply passing chat history messages into a chain. Here's an example: from langchain_core.prompts import ChatPromptTemplateprompt = ChatPromptTemplate.from_messages( [ ( "system", "You are a helpful assistant. Answer all questions to the best of your ability.", ), ("placeholder", "{messages}"), ])chain = prompt | chatai_msg = chain.invoke( { "messages": [ ( "human", "Translate this sentence from English to French: I love programming.", ), ("ai", "J'adore la programmation."), ("human", "What did you just say?"), ], })print(ai_msg.content) **API Reference:**[ChatPromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) I said "J'adore la programmation," which means "I love programming" in French. We can see that by passing the previous conversation into a chain, it can use it as context to answer questions. This is the basic concept underpinning chatbot memory - the rest of the guide will demonstrate convenient techniques for passing or reformatting messages. Chat history[​](#chat-history "Direct link to Chat history") ------------------------------------------------------------ It's perfectly fine to store and pass messages directly as an array, but we can use LangChain's built-in [message history class](https://api.python.langchain.com/en/latest/langchain_api_reference.html#module-langchain.memory) to store and load messages as well. Instances of this class are responsible for storing and loading chat messages from persistent storage. LangChain integrates with many providers - you can see a [list of integrations here](/v0.2/docs/integrations/memory/) - but for this demo we will use an ephemeral demo class. Here's an example of the API: from langchain_community.chat_message_histories import ChatMessageHistorydemo_ephemeral_chat_history = ChatMessageHistory()demo_ephemeral_chat_history.add_user_message( "Translate this sentence from English to French: I love programming.")demo_ephemeral_chat_history.add_ai_message("J'adore la programmation.")demo_ephemeral_chat_history.messages **API Reference:**[ChatMessageHistory](https://api.python.langchain.com/en/latest/chat_history/langchain_core.chat_history.ChatMessageHistory.html) [HumanMessage(content='Translate this sentence from English to French: I love programming.'), AIMessage(content="J'adore la programmation.")] We can use it directly to store conversation turns for our chain: demo_ephemeral_chat_history = ChatMessageHistory()input1 = "Translate this sentence from English to French: I love programming."demo_ephemeral_chat_history.add_user_message(input1)response = chain.invoke( { "messages": demo_ephemeral_chat_history.messages, })demo_ephemeral_chat_history.add_ai_message(response)input2 = "What did I just ask you?"demo_ephemeral_chat_history.add_user_message(input2)chain.invoke( { "messages": demo_ephemeral_chat_history.messages, }) AIMessage(content='You just asked me to translate the sentence "I love programming" from English to French.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 61, 'total_tokens': 79}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5cbb21c2-9c30-4031-8ea8-bfc497989535-0', usage_metadata={'input_tokens': 61, 'output_tokens': 18, 'total_tokens': 79}) Automatic history management[​](#automatic-history-management "Direct link to Automatic history management") ------------------------------------------------------------------------------------------------------------ The previous examples pass messages to the chain explicitly. This is a completely acceptable approach, but it does require external management of new messages. LangChain also includes an wrapper for LCEL chains that can handle this process automatically called `RunnableWithMessageHistory`. To show how it works, let's slightly modify the above prompt to take a final `input` variable that populates a `HumanMessage` template after the chat history. This means that we will expect a `chat_history` parameter that contains all messages BEFORE the current messages instead of all messages: prompt = ChatPromptTemplate.from_messages( [ ( "system", "You are a helpful assistant. Answer all questions to the best of your ability.", ), ("placeholder", "{chat_history}"), ("human", "{input}"), ])chain = prompt | chat We'll pass the latest input to the conversation here and let the `RunnableWithMessageHistory` class wrap our chain and do the work of appending that `input` variable to the chat history. Next, let's declare our wrapped chain: from langchain_core.runnables.history import RunnableWithMessageHistorydemo_ephemeral_chat_history_for_chain = ChatMessageHistory()chain_with_message_history = RunnableWithMessageHistory( chain, lambda session_id: demo_ephemeral_chat_history_for_chain, input_messages_key="input", history_messages_key="chat_history",) **API Reference:**[RunnableWithMessageHistory](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html) This class takes a few parameters in addition to the chain that we want to wrap: * A factory function that returns a message history for a given session id. This allows your chain to handle multiple users at once by loading different messages for different conversations. * An `input_messages_key` that specifies which part of the input should be tracked and stored in the chat history. In this example, we want to track the string passed in as `input`. * A `history_messages_key` that specifies what the previous messages should be injected into the prompt as. Our prompt has a `MessagesPlaceholder` named `chat_history`, so we specify this property to match. * (For chains with multiple outputs) an `output_messages_key` which specifies which output to store as history. This is the inverse of `input_messages_key`. We can invoke this new chain as normal, with an additional `configurable` field that specifies the particular `session_id` to pass to the factory function. This is unused for the demo, but in real-world chains, you'll want to return a chat history corresponding to the passed session: chain_with_message_history.invoke( {"input": "Translate this sentence from English to French: I love programming."}, {"configurable": {"session_id": "unused"}},) Parent run dc4e2f79-4bcd-4a36-9506-55ace9040588 not found for run 34b5773e-3ced-46a6-8daf-4d464c15c940. Treating as a root run. AIMessage(content='"J\'adore la programmation."', response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 39, 'total_tokens': 48}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-648b0822-b0bb-47a2-8e7d-7d34744be8f2-0', usage_metadata={'input_tokens': 39, 'output_tokens': 9, 'total_tokens': 48}) chain_with_message_history.invoke( {"input": "What did I just ask you?"}, {"configurable": {"session_id": "unused"}}) Parent run cc14b9d8-c59e-40db-a523-d6ab3fc2fa4f not found for run 5b75e25c-131e-46ee-9982-68569db04330. Treating as a root run. AIMessage(content='You asked me to translate the sentence "I love programming" from English to French.', response_metadata={'token_usage': {'completion_tokens': 17, 'prompt_tokens': 63, 'total_tokens': 80}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5950435c-1dc2-43a6-836f-f989fd62c95e-0', usage_metadata={'input_tokens': 63, 'output_tokens': 17, 'total_tokens': 80}) Modifying chat history[​](#modifying-chat-history "Direct link to Modifying chat history") ------------------------------------------------------------------------------------------ Modifying stored chat messages can help your chatbot handle a variety of situations. Here are some examples: ### Trimming messages[​](#trimming-messages "Direct link to Trimming messages") LLMs and chat models have limited context windows, and even if you're not directly hitting limits, you may want to limit the amount of distraction the model has to deal with. One solution is trim the historic messages before passing them to the model. Let's use an example history with some preloaded messages: demo_ephemeral_chat_history = ChatMessageHistory()demo_ephemeral_chat_history.add_user_message("Hey there! I'm Nemo.")demo_ephemeral_chat_history.add_ai_message("Hello!")demo_ephemeral_chat_history.add_user_message("How are you today?")demo_ephemeral_chat_history.add_ai_message("Fine thanks!")demo_ephemeral_chat_history.messages [HumanMessage(content="Hey there! I'm Nemo."), AIMessage(content='Hello!'), HumanMessage(content='How are you today?'), AIMessage(content='Fine thanks!')] Let's use this message history with the `RunnableWithMessageHistory` chain we declared above: chain_with_message_history = RunnableWithMessageHistory( chain, lambda session_id: demo_ephemeral_chat_history, input_messages_key="input", history_messages_key="chat_history",)chain_with_message_history.invoke( {"input": "What's my name?"}, {"configurable": {"session_id": "unused"}},) Parent run 7ff2d8ec-65e2-4f67-8961-e498e2c4a591 not found for run 3881e990-6596-4326-84f6-2b76949e0657. Treating as a root run. AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72}) We can see the chain remembers the preloaded name. But let's say we have a very small context window, and we want to trim the number of messages passed to the chain to only the 2 most recent ones. We can use the built in [trim\_messages](/v0.2/docs/how_to/trim_messages/) util to trim messages based on their token count before they reach our prompt. In this case we'll count each message as 1 "token" and keep only the last two messages: from operator import itemgetterfrom langchain_core.messages import trim_messagesfrom langchain_core.runnables import RunnablePassthroughtrimmer = trim_messages(strategy="last", max_tokens=2, token_counter=len)chain_with_trimming = ( RunnablePassthrough.assign(chat_history=itemgetter("chat_history") | trimmer) | prompt | chat)chain_with_trimmed_history = RunnableWithMessageHistory( chain_with_trimming, lambda session_id: demo_ephemeral_chat_history, input_messages_key="input", history_messages_key="chat_history",) **API Reference:**[trim\_messages](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.utils.trim_messages.html) | [RunnablePassthrough](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html) Let's call this new chain and check the messages afterwards: chain_with_trimmed_history.invoke( {"input": "Where does P. Sherman live?"}, {"configurable": {"session_id": "unused"}},) Parent run 775cde65-8d22-4c44-80bb-f0b9811c32ca not found for run 5cf71d0e-4663-41cd-8dbe-e9752689cfac. Treating as a root run. AIMessage(content='P. Sherman is a fictional character from the animated movie "Finding Nemo" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80}) demo_ephemeral_chat_history.messages [HumanMessage(content="Hey there! I'm Nemo."), AIMessage(content='Hello!'), HumanMessage(content='How are you today?'), AIMessage(content='Fine thanks!'), HumanMessage(content="What's my name?"), AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72}), HumanMessage(content='Where does P. Sherman live?'), AIMessage(content='P. Sherman is a fictional character from the animated movie "Finding Nemo" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80})] And we can see that our history has removed the two oldest messages while still adding the most recent conversation at the end. The next time the chain is called, `trim_messages` will be called again, and only the two most recent messages will be passed to the model. In this case, this means that the model will forget the name we gave it the next time we invoke it: chain_with_trimmed_history.invoke( {"input": "What is my name?"}, {"configurable": {"session_id": "unused"}},) Parent run fde7123f-6fd3-421a-a3fc-2fb37dead119 not found for run 061a4563-2394-470d-a3ed-9bf1388ca431. Treating as a root run. AIMessage(content="I'm sorry, but I don't have access to your personal information, so I don't know your name. How else may I assist you today?", response_metadata={'token_usage': {'completion_tokens': 31, 'prompt_tokens': 74, 'total_tokens': 105}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-0ab03495-1f7c-4151-9070-56d2d1c565ff-0', usage_metadata={'input_tokens': 74, 'output_tokens': 31, 'total_tokens': 105}) Check out our [how to guide on trimming messages](/v0.2/docs/how_to/trim_messages/) for more. ### Summary memory[​](#summary-memory "Direct link to Summary memory") We can use this same pattern in other ways too. For example, we could use an additional LLM call to generate a summary of the conversation before calling our chain. Let's recreate our chat history and chatbot chain: demo_ephemeral_chat_history = ChatMessageHistory()demo_ephemeral_chat_history.add_user_message("Hey there! I'm Nemo.")demo_ephemeral_chat_history.add_ai_message("Hello!")demo_ephemeral_chat_history.add_user_message("How are you today?")demo_ephemeral_chat_history.add_ai_message("Fine thanks!")demo_ephemeral_chat_history.messages [HumanMessage(content="Hey there! I'm Nemo."), AIMessage(content='Hello!'), HumanMessage(content='How are you today?'), AIMessage(content='Fine thanks!')] We'll slightly modify the prompt to make the LLM aware that will receive a condensed summary instead of a chat history: prompt = ChatPromptTemplate.from_messages( [ ( "system", "You are a helpful assistant. Answer all questions to the best of your ability. The provided chat history includes facts about the user you are speaking with.", ), ("placeholder", "{chat_history}"), ("user", "{input}"), ])chain = prompt | chatchain_with_message_history = RunnableWithMessageHistory( chain, lambda session_id: demo_ephemeral_chat_history, input_messages_key="input", history_messages_key="chat_history",) And now, let's create a function that will distill previous interactions into a summary. We can add this one to the front of the chain too: def summarize_messages(chain_input): stored_messages = demo_ephemeral_chat_history.messages if len(stored_messages) == 0: return False summarization_prompt = ChatPromptTemplate.from_messages( [ ("placeholder", "{chat_history}"), ( "user", "Distill the above chat messages into a single summary message. Include as many specific details as you can.", ), ] ) summarization_chain = summarization_prompt | chat summary_message = summarization_chain.invoke({"chat_history": stored_messages}) demo_ephemeral_chat_history.clear() demo_ephemeral_chat_history.add_message(summary_message) return Truechain_with_summarization = ( RunnablePassthrough.assign(messages_summarized=summarize_messages) | chain_with_message_history) Let's see if it remembers the name we gave it: chain_with_summarization.invoke( {"input": "What did I say my name was?"}, {"configurable": {"session_id": "unused"}},) AIMessage(content='You introduced yourself as Nemo. How can I assist you today, Nemo?') demo_ephemeral_chat_history.messages [AIMessage(content='The conversation is between Nemo and an AI. Nemo introduces himself and the AI responds with a greeting. Nemo then asks the AI how it is doing, and the AI responds that it is fine.'), HumanMessage(content='What did I say my name was?'), AIMessage(content='You introduced yourself as Nemo. How can I assist you today, Nemo?')] Note that invoking the chain again will generate another summary generated from the initial summary plus new messages and so on. You could also design a hybrid approach where a certain number of messages are retained in chat history while others are summarized. [Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/how_to/chatbots_memory.ipynb) * * * #### Was this page helpful? #### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E). [ Previous How to use a vectorstore as a retriever ](/v0.2/docs/how_to/vectorstore_retriever/)[ Next How to use example selectors ](/v0.2/docs/how_to/example_selectors/) * [Setup](#setup) * [Message passing](#message-passing) * [Chat history](#chat-history) * [Automatic history management](#automatic-history-management) * [Modifying chat history](#modifying-chat-history) * [Trimming messages](#trimming-messages) * [Summary memory](#summary-memory)
null