From 7aec5395e3d7a9cc6496635e77f58e2dc5503f94 Mon Sep 17 00:00:00 2001 From: "Documenter.jl" Date: Sat, 19 Oct 2024 12:43:16 +0000 Subject: [PATCH] build based on 45f9b43 --- previews/PR218/404.html | 4 +- .../{app.DOuSObzt.js => app.DgFiOK72.js} | 2 +- .../chunks/@localSearchIndexroot.CKvFodwz.js | 4 + .../chunks/@localSearchIndexroot.Dn3ujldP.js | 4 - ...isIjwo.js => VPLocalSearchBox.DERldJmv.js} | 4 +- .../{theme.ik5QZKRB.js => theme.r06NSmXs.js} | 4 +- ...ra_tools_agent_tools_intro.md.C6nMFv4B.js} | 2 +- ...ols_agent_tools_intro.md.C6nMFv4B.lean.js} | 2 +- ...xtra_tools_api_tools_intro.md.DRt6snGq.js} | 2 +- ...tools_api_tools_intro.md.DRt6snGq.lean.js} | 2 +- ...xtra_tools_rag_tools_intro.md.oi28ZdI4.js} | 2 +- ...tools_rag_tools_intro.md.oi28ZdI4.lean.js} | 2 +- ...tools_text_utilities_intro.md.Cls15k4M.js} | 2 +- ..._text_utilities_intro.md.Cls15k4M.lean.js} | 2 +- ...d.DMO0NW9D.js => reference.md.Bl4MWKuL.js} | 38 ++-- ....lean.js => reference.md.Bl4MWKuL.lean.js} | 38 ++-- ...js => reference_agenttools.md.CE_B_eQV.js} | 2 +- ... reference_agenttools.md.CE_B_eQV.lean.js} | 2 +- ...1.js => reference_apitools.md.BVdQH4AZ.js} | 2 +- ...=> reference_apitools.md.BVdQH4AZ.lean.js} | 2 +- ... => reference_experimental.md.DW1f4gT-.js} | 2 +- ...eference_experimental.md.DW1f4gT-.lean.js} | 2 +- ...n.js => reference_ragtools.md.Bby7eP61.js} | 2 +- ...=> reference_ragtools.md.Bby7eP61.lean.js} | 2 +- .../PR218/coverage_of_model_providers.html | 6 +- previews/PR218/examples/building_RAG.html | 6 +- previews/PR218/examples/readme_examples.html | 6 +- .../examples/working_with_aitemplates.html | 6 +- .../examples/working_with_custom_apis.html | 6 +- .../working_with_google_ai_studio.html | 6 +- .../PR218/examples/working_with_ollama.html | 6 +- .../PR218/extra_tools/agent_tools_intro.html | 18 +- .../PR218/extra_tools/api_tools_intro.html | 10 +- .../PR218/extra_tools/rag_tools_intro.html | 24 +-- .../extra_tools/text_utilities_intro.html | 20 +- .../PR218/frequently_asked_questions.html | 6 +- previews/PR218/getting_started.html | 6 +- previews/PR218/hashmap.json | 2 +- previews/PR218/how_it_works.html | 6 +- previews/PR218/index.html | 6 +- previews/PR218/prompts/RAG.html | 6 +- previews/PR218/prompts/agents.html | 6 +- previews/PR218/prompts/classification.html | 6 +- previews/PR218/prompts/critic.html | 6 +- previews/PR218/prompts/extraction.html | 6 +- previews/PR218/prompts/general.html | 6 +- previews/PR218/prompts/persona-task.html | 6 +- previews/PR218/prompts/visual.html | 6 +- previews/PR218/reference.html | 192 +++++++++--------- previews/PR218/reference_agenttools.html | 30 +-- previews/PR218/reference_apitools.html | 12 +- previews/PR218/reference_experimental.html | 10 +- previews/PR218/reference_ragtools.html | 150 +++++++------- 53 files changed, 356 insertions(+), 356 deletions(-) rename previews/PR218/assets/{app.DOuSObzt.js => app.DgFiOK72.js} (97%) create mode 100644 previews/PR218/assets/chunks/@localSearchIndexroot.CKvFodwz.js delete mode 100644 previews/PR218/assets/chunks/@localSearchIndexroot.Dn3ujldP.js rename previews/PR218/assets/chunks/{VPLocalSearchBox.CYisIjwo.js => VPLocalSearchBox.DERldJmv.js} (99%) rename previews/PR218/assets/chunks/{theme.ik5QZKRB.js => theme.r06NSmXs.js} (99%) rename previews/PR218/assets/{extra_tools_agent_tools_intro.md.cSVCxyyT.js => extra_tools_agent_tools_intro.md.C6nMFv4B.js} (98%) rename previews/PR218/assets/{extra_tools_agent_tools_intro.md.cSVCxyyT.lean.js => extra_tools_agent_tools_intro.md.C6nMFv4B.lean.js} (98%) rename previews/PR218/assets/{extra_tools_api_tools_intro.md.Dbv02Yyd.js => extra_tools_api_tools_intro.md.DRt6snGq.js} (99%) rename previews/PR218/assets/{extra_tools_api_tools_intro.md.Dbv02Yyd.lean.js => extra_tools_api_tools_intro.md.DRt6snGq.lean.js} (99%) rename previews/PR218/assets/{extra_tools_rag_tools_intro.md.CJjL2hwq.lean.js => extra_tools_rag_tools_intro.md.oi28ZdI4.js} (99%) rename previews/PR218/assets/{extra_tools_rag_tools_intro.md.CJjL2hwq.js => extra_tools_rag_tools_intro.md.oi28ZdI4.lean.js} (99%) rename previews/PR218/assets/{extra_tools_text_utilities_intro.md.B_lqAVJR.js => extra_tools_text_utilities_intro.md.Cls15k4M.js} (98%) rename previews/PR218/assets/{extra_tools_text_utilities_intro.md.B_lqAVJR.lean.js => extra_tools_text_utilities_intro.md.Cls15k4M.lean.js} (98%) rename previews/PR218/assets/{reference.md.DMO0NW9D.js => reference.md.Bl4MWKuL.js} (96%) rename previews/PR218/assets/{reference.md.DMO0NW9D.lean.js => reference.md.Bl4MWKuL.lean.js} (96%) rename previews/PR218/assets/{reference_agenttools.md.D536xaTJ.js => reference_agenttools.md.CE_B_eQV.js} (97%) rename previews/PR218/assets/{reference_agenttools.md.D536xaTJ.lean.js => reference_agenttools.md.CE_B_eQV.lean.js} (97%) rename previews/PR218/assets/{reference_apitools.md.DVDc6Kr1.js => reference_apitools.md.BVdQH4AZ.js} (97%) rename previews/PR218/assets/{reference_apitools.md.DVDc6Kr1.lean.js => reference_apitools.md.BVdQH4AZ.lean.js} (97%) rename previews/PR218/assets/{reference_experimental.md.BNmmZjbx.js => reference_experimental.md.DW1f4gT-.js} (95%) rename previews/PR218/assets/{reference_experimental.md.BNmmZjbx.lean.js => reference_experimental.md.DW1f4gT-.lean.js} (95%) rename previews/PR218/assets/{reference_ragtools.md.gYCqDEbn.lean.js => reference_ragtools.md.Bby7eP61.js} (93%) rename previews/PR218/assets/{reference_ragtools.md.gYCqDEbn.js => reference_ragtools.md.Bby7eP61.lean.js} (93%) diff --git a/previews/PR218/404.html b/previews/PR218/404.html index a1f3d848d..048aca801 100644 --- a/previews/PR218/404.html +++ b/previews/PR218/404.html @@ -8,14 +8,14 @@ - +
- + \ No newline at end of file diff --git a/previews/PR218/assets/app.DOuSObzt.js b/previews/PR218/assets/app.DgFiOK72.js similarity index 97% rename from previews/PR218/assets/app.DOuSObzt.js rename to previews/PR218/assets/app.DgFiOK72.js index 50422fb24..413f2be3e 100644 --- a/previews/PR218/assets/app.DOuSObzt.js +++ b/previews/PR218/assets/app.DgFiOK72.js @@ -1,4 +1,4 @@ -import { R as RawTheme } from "./chunks/theme.ik5QZKRB.js"; +import { R as RawTheme } from "./chunks/theme.r06NSmXs.js"; import { R as inBrowser, a6 as useUpdateHead, a7 as RouterSymbol, a8 as initData, a9 as dataSymbol, aa as Content, ab as ClientOnly, ac as siteDataRef, ad as createSSRApp, ae as createRouter, af as pathToFile, d as defineComponent, u as useData, v as onMounted, s as watchEffect, ag as usePrefetch, ah as useCopyCode, ai as useCodeGroups, a4 as h } from "./chunks/framework.Dg7-7npA.js"; function resolveThemeExtends(theme) { if (theme.extends) { diff --git a/previews/PR218/assets/chunks/@localSearchIndexroot.CKvFodwz.js b/previews/PR218/assets/chunks/@localSearchIndexroot.CKvFodwz.js new file mode 100644 index 000000000..7340f1a54 --- /dev/null +++ b/previews/PR218/assets/chunks/@localSearchIndexroot.CKvFodwz.js @@ -0,0 +1,4 @@ +const _localSearchIndexroot = '{"documentCount":184,"nextId":184,"documentIds":{"0":"/PromptingTools.jl/previews/PR218/coverage_of_model_providers#Coverage-of-Model-Providers","1":"/PromptingTools.jl/previews/PR218/examples/building_RAG#Building-a-Simple-Retrieval-Augmented-Generation-(RAG)-System-with-RAGTools","2":"/PromptingTools.jl/previews/PR218/examples/building_RAG#RAG-in-Two-Lines","3":"/PromptingTools.jl/previews/PR218/examples/building_RAG#Evaluations","4":"/PromptingTools.jl/previews/PR218/examples/building_RAG#Generate-Q-and-A-pairs","5":"/PromptingTools.jl/previews/PR218/examples/building_RAG#Explore-one-Q-and-A-pair","6":"/PromptingTools.jl/previews/PR218/examples/building_RAG#Evaluate-this-Q-and-A-pair","7":"/PromptingTools.jl/previews/PR218/examples/building_RAG#Evaluate-the-Whole-Set","8":"/PromptingTools.jl/previews/PR218/examples/building_RAG#What-would-we-do-next?","9":"/PromptingTools.jl/previews/PR218/examples/readme_examples#Various-Examples","10":"/PromptingTools.jl/previews/PR218/examples/readme_examples#ai*-Functions-Overview","11":"/PromptingTools.jl/previews/PR218/examples/readme_examples#Seamless-Integration-Into-Your-Workflow","12":"/PromptingTools.jl/previews/PR218/examples/readme_examples#Advanced-Prompts-/-Conversations","13":"/PromptingTools.jl/previews/PR218/examples/readme_examples#Templated-Prompts","14":"/PromptingTools.jl/previews/PR218/examples/readme_examples#Asynchronous-Execution","15":"/PromptingTools.jl/previews/PR218/examples/readme_examples#Model-Aliases","16":"/PromptingTools.jl/previews/PR218/examples/readme_examples#Embeddings","17":"/PromptingTools.jl/previews/PR218/examples/readme_examples#Classification","18":"/PromptingTools.jl/previews/PR218/examples/readme_examples#Routing-to-Defined-Categories","19":"/PromptingTools.jl/previews/PR218/examples/readme_examples#Data-Extraction","20":"/PromptingTools.jl/previews/PR218/examples/readme_examples#OCR-and-Image-Comprehension","21":"/PromptingTools.jl/previews/PR218/examples/readme_examples#Experimental-Agent-Workflows-/-Output-Validation-with-airetry!","22":"/PromptingTools.jl/previews/PR218/examples/readme_examples#Using-Ollama-models","23":"/PromptingTools.jl/previews/PR218/examples/readme_examples#Using-MistralAI-API-and-other-OpenAI-compatible-APIs","24":"/PromptingTools.jl/previews/PR218/examples/working_with_aitemplates#Using-AITemplates","25":"/PromptingTools.jl/previews/PR218/examples/working_with_custom_apis#Custom-APIs","26":"/PromptingTools.jl/previews/PR218/examples/working_with_custom_apis#Using-MistralAI","27":"/PromptingTools.jl/previews/PR218/examples/working_with_custom_apis#Using-other-OpenAI-compatible-APIs","28":"/PromptingTools.jl/previews/PR218/examples/working_with_custom_apis#Using-llama.cpp-server","29":"/PromptingTools.jl/previews/PR218/examples/working_with_custom_apis#Using-Databricks-Foundation-Models","30":"/PromptingTools.jl/previews/PR218/examples/working_with_custom_apis#Using-Together.ai","31":"/PromptingTools.jl/previews/PR218/examples/working_with_custom_apis#Using-Fireworks.ai","32":"/PromptingTools.jl/previews/PR218/examples/working_with_google_ai_studio#Working-with-Google-AI-Studio","33":"/PromptingTools.jl/previews/PR218/examples/working_with_google_ai_studio#Text-Generation-with-aigenerate","34":"/PromptingTools.jl/previews/PR218/examples/working_with_google_ai_studio#Simple-message","35":"/PromptingTools.jl/previews/PR218/examples/working_with_google_ai_studio#Advanced-Prompts","36":"/PromptingTools.jl/previews/PR218/examples/working_with_google_ai_studio#Gotchas","37":"/PromptingTools.jl/previews/PR218/examples/working_with_ollama#Local-models-with-Ollama.ai","38":"/PromptingTools.jl/previews/PR218/examples/working_with_ollama#Text-Generation-with-aigenerate","39":"/PromptingTools.jl/previews/PR218/examples/working_with_ollama#Simple-message","40":"/PromptingTools.jl/previews/PR218/examples/working_with_ollama#Standard-string-interpolation","41":"/PromptingTools.jl/previews/PR218/examples/working_with_ollama#Advanced-Prompts","42":"/PromptingTools.jl/previews/PR218/examples/working_with_ollama#Schema-Changes-/-Custom-models","43":"/PromptingTools.jl/previews/PR218/examples/working_with_ollama#Providing-Images-with-aiscan","44":"/PromptingTools.jl/previews/PR218/examples/working_with_ollama#Embeddings-with-aiembed","45":"/PromptingTools.jl/previews/PR218/examples/working_with_ollama#Simple-embedding-for-one-document","46":"/PromptingTools.jl/previews/PR218/examples/working_with_ollama#Multiple-documents-embedding","47":"/PromptingTools.jl/previews/PR218/examples/working_with_ollama#Using-postprocessing-function","48":"/PromptingTools.jl/previews/PR218/extra_tools/agent_tools_intro#Agent-Tools-Introduction","49":"/PromptingTools.jl/previews/PR218/extra_tools/agent_tools_intro#Highlights","50":"/PromptingTools.jl/previews/PR218/extra_tools/agent_tools_intro#Examples","51":"/PromptingTools.jl/previews/PR218/extra_tools/agent_tools_intro#Automatic-Fixing-of-AI-Calls","52":"/PromptingTools.jl/previews/PR218/extra_tools/agent_tools_intro#References","53":"/PromptingTools.jl/previews/PR218/extra_tools/api_tools_intro#APITools-Introduction","54":"/PromptingTools.jl/previews/PR218/extra_tools/api_tools_intro#Highlights","55":"/PromptingTools.jl/previews/PR218/extra_tools/api_tools_intro#References","56":"/PromptingTools.jl/previews/PR218/extra_tools/text_utilities_intro#Text-Utilities","57":"/PromptingTools.jl/previews/PR218/extra_tools/text_utilities_intro#Highlights","58":"/PromptingTools.jl/previews/PR218/extra_tools/text_utilities_intro#References","59":"/PromptingTools.jl/previews/PR218/extra_tools/rag_tools_intro#RAG-Tools-Introduction","60":"/PromptingTools.jl/previews/PR218/extra_tools/rag_tools_intro#Highlights","61":"/PromptingTools.jl/previews/PR218/extra_tools/rag_tools_intro#Examples","62":"/PromptingTools.jl/previews/PR218/extra_tools/rag_tools_intro#RAG-Interface","63":"/PromptingTools.jl/previews/PR218/extra_tools/rag_tools_intro#System-Overview","64":"/PromptingTools.jl/previews/PR218/extra_tools/rag_tools_intro#RAG-Diagram","65":"/PromptingTools.jl/previews/PR218/extra_tools/rag_tools_intro#Passing-Keyword-Arguments","66":"/PromptingTools.jl/previews/PR218/extra_tools/rag_tools_intro#Deepdive","67":"/PromptingTools.jl/previews/PR218/extra_tools/rag_tools_intro#References","68":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Frequently-Asked-Questions","69":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Why-OpenAI","70":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#What-if-I-cannot-access-OpenAI?","71":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Data-Privacy-and-OpenAI","72":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Creating-OpenAI-API-Key","73":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#getting-an-error-argumenterror-api-key-cannot-be-empty-despite-having-set-openai-api-key-getting-an-error-argumenterror-apikey-cannot-be-empty-despite-having-set-openaiapi-key","74":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Getting-an-error-"Rate-limit-exceeded"-from-OpenAI?","75":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Getting-the-error-"429-Too-Many-Requests"?","76":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Setting-OpenAI-Spending-Limits","77":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#How-much-does-it-cost?-Is-it-worth-paying-for?","78":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#How-to-try-new-OpenAI-models-if-I\'m-not-Tier-5-customer?","79":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Configuring-the-Environment-Variable-for-API-Key","80":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Setting-the-API-Key-via-Preferences.jl","81":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Understanding-the-API-Keyword-Arguments-in-aigenerate-(api_kwargs)","82":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Instant-Access-from-Anywhere","83":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Open-Source-Alternatives","84":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Setup-Guide-for-Ollama","85":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Changing-the-Default-Model-or-Schema","86":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Using-Custom-API-Providers-like-Azure-or-Databricks","87":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#How-to-have-Multi-turn-Conversations?","88":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#How-to-have-typed-responses?","89":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#How-to-quickly-create-a-prompt-template?","90":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Do-we-have-a-RecursiveCharacterTextSplitter-like-Langchain?","91":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#How-would-I-fine-tune-a-model?","92":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Can-I-see-how-my-prompt-is-rendered-/-what-is-sent-to-the-API?","93":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Automatic-Logging-/-Tracing","94":"/PromptingTools.jl/previews/PR218/getting_started#Getting-Started","95":"/PromptingTools.jl/previews/PR218/getting_started#Prerequisites","96":"/PromptingTools.jl/previews/PR218/getting_started#Installation","97":"/PromptingTools.jl/previews/PR218/getting_started#Quick-Start-with-@ai_str","98":"/PromptingTools.jl/previews/PR218/getting_started#Using-aigenerate-with-placeholders","99":"/PromptingTools.jl/previews/PR218/how_it_works#How-It-Works","100":"/PromptingTools.jl/previews/PR218/how_it_works#Key-Concepts","101":"/PromptingTools.jl/previews/PR218/how_it_works#API/Model-Providers","102":"/PromptingTools.jl/previews/PR218/how_it_works#Schemas","103":"/PromptingTools.jl/previews/PR218/how_it_works#Prompts","104":"/PromptingTools.jl/previews/PR218/how_it_works#Messages","105":"/PromptingTools.jl/previews/PR218/how_it_works#Prompt-Templates","106":"/PromptingTools.jl/previews/PR218/how_it_works#ai*-Functions-Overview","107":"/PromptingTools.jl/previews/PR218/how_it_works#Walkthrough-Example-for-aigenerate","108":"/PromptingTools.jl/previews/PR218/how_it_works#Walkthrough-Example-for-aiextract","109":"/PromptingTools.jl/previews/PR218/prompts/RAG#Basic-Rag-Templates","110":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGAnswerFromContext","111":"/PromptingTools.jl/previews/PR218/prompts/RAG#Ranking-Templates","112":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGRankGPT","113":"/PromptingTools.jl/previews/PR218/prompts/RAG#Metadata-Templates","114":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGExtractMetadataLong","115":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGExtractMetadataShort","116":"/PromptingTools.jl/previews/PR218/prompts/RAG#Refinement-Templates","117":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGAnswerRefiner","118":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGWebSearchRefiner","119":"/PromptingTools.jl/previews/PR218/prompts/RAG#Evaluation-Templates","120":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGCreateQAFromContext","121":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGJudgeAnswerFromContext","122":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGJudgeAnswerFromContextShort","123":"/PromptingTools.jl/previews/PR218/prompts/RAG#Query-Transformations-Templates","124":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGJuliaQueryHyDE","125":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGQueryHyDE","126":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGQueryKeywordExpander","127":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGQueryOptimizer","128":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGQuerySimplifier","129":"/PromptingTools.jl/previews/PR218/prompts/agents#Code-Fixing-Templates","130":"/PromptingTools.jl/previews/PR218/prompts/agents#Template:-CodeFixerRCI","131":"/PromptingTools.jl/previews/PR218/prompts/agents#Template:-CodeFixerShort","132":"/PromptingTools.jl/previews/PR218/prompts/agents#Template:-CodeFixerTiny","133":"/PromptingTools.jl/previews/PR218/prompts/agents#Feedback-Templates","134":"/PromptingTools.jl/previews/PR218/prompts/agents#Template:-FeedbackFromEvaluator","135":"/PromptingTools.jl/previews/PR218/prompts/classification#Classification-Templates","136":"/PromptingTools.jl/previews/PR218/prompts/classification#Template:-InputClassifier","137":"/PromptingTools.jl/previews/PR218/prompts/classification#Template:-JudgeIsItTrue","138":"/PromptingTools.jl/previews/PR218/prompts/classification#Template:-QuestionRouter","139":"/PromptingTools.jl/previews/PR218/prompts/critic#Critic-Templates","140":"/PromptingTools.jl/previews/PR218/prompts/critic#Template:-ChiefEditorTranscriptCritic","141":"/PromptingTools.jl/previews/PR218/prompts/critic#Template:-GenericTranscriptCritic","142":"/PromptingTools.jl/previews/PR218/prompts/critic#Template:-JuliaExpertTranscriptCritic","143":"/PromptingTools.jl/previews/PR218/prompts/extraction#Xml-Formatted-Templates","144":"/PromptingTools.jl/previews/PR218/prompts/extraction#Template:-ExtractDataCoTXML","145":"/PromptingTools.jl/previews/PR218/prompts/extraction#Template:-ExtractDataXML","146":"/PromptingTools.jl/previews/PR218/prompts/extraction#Extraction-Templates","147":"/PromptingTools.jl/previews/PR218/prompts/extraction#Template:-ExtractData","148":"/PromptingTools.jl/previews/PR218/prompts/general#General-Templates","149":"/PromptingTools.jl/previews/PR218/prompts/general#Template:-BlankSystemUser","150":"/PromptingTools.jl/previews/PR218/prompts/general#Template:-PromptEngineerForTask","151":"/PromptingTools.jl/previews/PR218/prompts/visual#Visual-Templates","152":"/PromptingTools.jl/previews/PR218/prompts/visual#Template:-BlogTitleImageGenerator","153":"/PromptingTools.jl/previews/PR218/prompts/visual#Template:-OCRTask","154":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Persona-Task-Templates","155":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-AnalystChaptersInTranscript","156":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-AnalystDecisionsInTranscript","157":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-AnalystThemesInResponses","158":"/PromptingTools.jl/previews/PR218/prompts/persona-task#theme-1-theme-description","159":"/PromptingTools.jl/previews/PR218/prompts/persona-task#theme-2-theme-description","160":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-AssistantAsk","161":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-ConversationLabeler","162":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-DetailOrientedTask","163":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-DrafterEmailBrief","164":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-GenericTopicExpertAsk","165":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-GenericWriter","166":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-JavaScriptExpertAsk","167":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-JuliaBlogWriter","168":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-JuliaExpertAsk","169":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-JuliaExpertCoTTask","170":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-JuliaExpertTestCode","171":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-JuliaRecapCoTTask","172":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-JuliaRecapTask","173":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-LinuxBashExpertAsk","174":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-StorytellerExplainSHAP","175":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Xml-Formatted-Templates","176":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-JuliaExpertAskXML","177":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-JuliaExpertCoTTaskXML","178":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-JuliaExpertTestCodeXML","179":"/PromptingTools.jl/previews/PR218/reference_agenttools#Reference-for-AgentTools","180":"/PromptingTools.jl/previews/PR218/reference_apitools#Reference-for-APITools","181":"/PromptingTools.jl/previews/PR218/reference#Reference","182":"/PromptingTools.jl/previews/PR218/reference_experimental#Reference-for-Experimental-Module","183":"/PromptingTools.jl/previews/PR218/reference_ragtools#Reference-for-RAGTools"},"fieldIds":{"title":0,"titles":1,"text":2},"fieldLength":{"0":[4,1,165],"1":[10,1,78],"2":[4,10,197],"3":[1,1,40],"4":[5,1,74],"5":[6,1,68],"6":[6,1,180],"7":[4,1,366],"8":[6,1,111],"9":[2,1,1],"10":[3,2,306],"11":[5,2,128],"12":[3,2,152],"13":[2,2,161],"14":[2,2,40],"15":[2,2,97],"16":[1,2,65],"17":[1,2,97],"18":[4,3,81],"19":[2,2,158],"20":[4,2,103],"21":[8,2,274],"22":[3,2,108],"23":[8,2,195],"24":[2,1,322],"25":[2,1,27],"26":[2,2,145],"27":[5,2,87],"28":[4,2,118],"29":[4,2,85],"30":[3,2,109],"31":[3,2,162],"32":[5,1,83],"33":[4,5,14],"34":[2,8,55],"35":[2,8,90],"36":[1,8,59],"37":[5,1,147],"38":[4,5,1],"39":[2,8,51],"40":[3,8,35],"41":[2,8,122],"42":[4,8,134],"43":[4,5,40],"44":[3,5,1],"45":[5,7,42],"46":[3,7,53],"47":[3,7,61],"48":[3,1,37],"49":[1,3,185],"50":[1,3,1],"51":[5,4,227],"52":[1,3,920],"53":[2,1,23],"54":[1,2,41],"55":[1,2,87],"56":[2,1,28],"57":[1,2,126],"58":[1,2,513],"59":[3,1,100],"60":[1,3,91],"61":[1,3,375],"62":[2,3,1],"63":[2,4,195],"64":[2,4,79],"65":[3,5,105],"66":[1,4,165],"67":[1,3,779],"68":[3,1,1],"69":[2,3,54],"70":[7,5,36],"71":[4,3,65],"72":[4,3,54],"73":[19,3,120],"74":[10,3,151],"75":[9,3,87],"76":[4,3,57],"77":[10,3,99],"78":[14,3,118],"79":[7,3,97],"80":[7,3,41],"81":[10,3,8],"82":[4,3,47],"83":[3,3,31],"84":[4,3,108],"85":[6,3,83],"86":[8,3,111],"87":[7,3,120],"88":[6,3,256],"89":[8,3,193],"90":[8,3,70],"91":[8,3,87],"92":[14,3,155],"93":[3,3,141],"94":[2,1,1],"95":[1,2,112],"96":[1,2,37],"97":[5,2,112],"98":[4,2,101],"99":[3,1,49],"100":[2,3,91],"101":[3,5,56],"102":[1,5,77],"103":[1,5,61],"104":[1,5,77],"105":[2,5,139],"106":[3,5,315],"107":[4,3,203],"108":[4,3,451],"109":[3,1,1],"110":[2,3,61],"111":[2,1,1],"112":[2,2,90],"113":[2,1,1],"114":[2,2,160],"115":[2,2,66],"116":[2,1,1],"117":[2,2,111],"118":[2,2,119],"119":[2,1,1],"120":[2,2,140],"121":[2,2,116],"122":[2,2,63],"123":[3,1,1],"124":[2,3,85],"125":[2,3,83],"126":[2,3,122],"127":[2,3,85],"128":[2,3,65],"129":[3,1,1],"130":[2,3,236],"131":[2,3,126],"132":[2,3,60],"133":[2,1,1],"134":[2,2,23],"135":[2,1,1],"136":[2,2,73],"137":[2,2,41],"138":[2,2,101],"139":[2,1,1],"140":[2,2,188],"141":[2,2,136],"142":[2,2,178],"143":[3,1,1],"144":[2,3,101],"145":[2,3,83],"146":[2,1,1],"147":[2,2,74],"148":[2,1,1],"149":[2,2,35],"150":[2,2,71],"151":[2,1,1],"152":[2,2,76],"153":[2,2,51],"154":[3,1,1],"155":[2,3,198],"156":[2,3,207],"157":[2,3,124],"158":[4,1,5],"159":[4,1,36],"160":[2,4,47],"161":[2,4,108],"162":[2,4,46],"163":[2,4,160],"164":[2,4,65],"165":[2,4,65],"166":[2,4,63],"167":[2,4,118],"168":[2,4,51],"169":[2,4,85],"170":[2,4,171],"171":[2,4,168],"172":[2,4,174],"173":[2,4,66],"174":[2,4,175],"175":[3,4,1],"176":[2,6,60],"177":[2,6,96],"178":[2,6,181],"179":[3,1,1183],"180":[3,1,101],"181":[1,1,2819],"182":[4,1,68],"183":[3,1,1818]},"averageFieldLength":[3.2500000000000013,2.8695652173913024,136.58152173913047],"storedFields":{"0":{"title":"Coverage of Model Providers","titles":[]},"1":{"title":"Building a Simple Retrieval-Augmented Generation (RAG) System with RAGTools","titles":[]},"2":{"title":"RAG in Two Lines","titles":["Building a Simple Retrieval-Augmented Generation (RAG) System with RAGTools"]},"3":{"title":"Evaluations","titles":[]},"4":{"title":"Generate Q&A pairs","titles":["Evaluations"]},"5":{"title":"Explore one Q&A pair","titles":["Evaluations"]},"6":{"title":"Evaluate this Q&A pair","titles":["Evaluations"]},"7":{"title":"Evaluate the Whole Set","titles":["Evaluations"]},"8":{"title":"What would we do next?","titles":[]},"9":{"title":"Various Examples","titles":[]},"10":{"title":"ai* Functions Overview","titles":["Various Examples"]},"11":{"title":"Seamless Integration Into Your Workflow","titles":["Various Examples"]},"12":{"title":"Advanced Prompts / Conversations","titles":["Various Examples"]},"13":{"title":"Templated Prompts","titles":["Various Examples"]},"14":{"title":"Asynchronous Execution","titles":["Various Examples"]},"15":{"title":"Model Aliases","titles":["Various Examples"]},"16":{"title":"Embeddings","titles":["Various Examples"]},"17":{"title":"Classification","titles":["Various Examples"]},"18":{"title":"Routing to Defined Categories","titles":["Various Examples","Classification"]},"19":{"title":"Data Extraction","titles":["Various Examples"]},"20":{"title":"OCR and Image Comprehension","titles":["Various Examples"]},"21":{"title":"Experimental Agent Workflows / Output Validation with airetry!","titles":["Various Examples"]},"22":{"title":"Using Ollama models","titles":["Various Examples"]},"23":{"title":"Using MistralAI API and other OpenAI-compatible APIs","titles":["Various Examples"]},"24":{"title":"Using AITemplates","titles":[]},"25":{"title":"Custom APIs","titles":[]},"26":{"title":"Using MistralAI","titles":["Custom APIs"]},"27":{"title":"Using other OpenAI-compatible APIs","titles":["Custom APIs"]},"28":{"title":"Using llama.cpp server","titles":["Custom APIs"]},"29":{"title":"Using Databricks Foundation Models","titles":["Custom APIs"]},"30":{"title":"Using Together.ai","titles":["Custom APIs"]},"31":{"title":"Using Fireworks.ai","titles":["Custom APIs"]},"32":{"title":"Working with Google AI Studio","titles":[]},"33":{"title":"Text Generation with aigenerate","titles":["Working with Google AI Studio"]},"34":{"title":"Simple message","titles":["Working with Google AI Studio","Text Generation with aigenerate"]},"35":{"title":"Advanced Prompts","titles":["Working with Google AI Studio","Text Generation with aigenerate"]},"36":{"title":"Gotchas","titles":["Working with Google AI Studio","Text Generation with aigenerate"]},"37":{"title":"Local models with Ollama.ai","titles":[]},"38":{"title":"Text Generation with aigenerate","titles":["Local models with Ollama.ai"]},"39":{"title":"Simple message","titles":["Local models with Ollama.ai","Text Generation with aigenerate"]},"40":{"title":"Standard string interpolation","titles":["Local models with Ollama.ai","Text Generation with aigenerate"]},"41":{"title":"Advanced Prompts","titles":["Local models with Ollama.ai","Text Generation with aigenerate"]},"42":{"title":"Schema Changes / Custom models","titles":["Local models with Ollama.ai","Text Generation with aigenerate"]},"43":{"title":"Providing Images with aiscan","titles":["Local models with Ollama.ai"]},"44":{"title":"Embeddings with aiembed","titles":["Local models with Ollama.ai"]},"45":{"title":"Simple embedding for one document","titles":["Local models with Ollama.ai","Embeddings with aiembed"]},"46":{"title":"Multiple documents embedding","titles":["Local models with Ollama.ai","Embeddings with aiembed"]},"47":{"title":"Using postprocessing function","titles":["Local models with Ollama.ai","Embeddings with aiembed"]},"48":{"title":"Agent Tools Introduction","titles":[]},"49":{"title":"Highlights","titles":["Agent Tools Introduction"]},"50":{"title":"Examples","titles":["Agent Tools Introduction"]},"51":{"title":"Automatic Fixing of AI Calls","titles":["Agent Tools Introduction","Examples"]},"52":{"title":"References","titles":["Agent Tools Introduction"]},"53":{"title":"APITools Introduction","titles":[]},"54":{"title":"Highlights","titles":["APITools Introduction"]},"55":{"title":"References","titles":["APITools Introduction"]},"56":{"title":"Text Utilities","titles":[]},"57":{"title":"Highlights","titles":["Text Utilities"]},"58":{"title":"References","titles":["Text Utilities"]},"59":{"title":"RAG Tools Introduction","titles":[]},"60":{"title":"Highlights","titles":["RAG Tools Introduction"]},"61":{"title":"Examples","titles":["RAG Tools Introduction"]},"62":{"title":"RAG Interface","titles":["RAG Tools Introduction"]},"63":{"title":"System Overview","titles":["RAG Tools Introduction","RAG Interface"]},"64":{"title":"RAG Diagram","titles":["RAG Tools Introduction","RAG Interface"]},"65":{"title":"Passing Keyword Arguments","titles":["RAG Tools Introduction","RAG Interface","RAG Diagram"]},"66":{"title":"Deepdive","titles":["RAG Tools Introduction","RAG Interface"]},"67":{"title":"References","titles":["RAG Tools Introduction"]},"68":{"title":"Frequently Asked Questions","titles":[]},"69":{"title":"Why OpenAI","titles":["Frequently Asked Questions"]},"70":{"title":"What if I cannot access OpenAI?","titles":["Frequently Asked Questions","Why OpenAI"]},"71":{"title":"Data Privacy and OpenAI","titles":["Frequently Asked Questions"]},"72":{"title":"Creating OpenAI API Key","titles":["Frequently Asked Questions"]},"73":{"title":"Getting an error "ArgumentError: api_key cannot be empty" despite having set OPENAI_API_KEY? {#Getting-an-error-"ArgumentError:-apikey-cannot-be-empty"-despite-having-set-OPENAIAPI_KEY?}","titles":["Frequently Asked Questions"]},"74":{"title":"Getting an error "Rate limit exceeded" from OpenAI?","titles":["Frequently Asked Questions"]},"75":{"title":"Getting the error "429 Too Many Requests"?","titles":["Frequently Asked Questions"]},"76":{"title":"Setting OpenAI Spending Limits","titles":["Frequently Asked Questions"]},"77":{"title":"How much does it cost? Is it worth paying for?","titles":["Frequently Asked Questions"]},"78":{"title":"How to try new OpenAI models if I\'m not Tier 5 customer?","titles":["Frequently Asked Questions"]},"79":{"title":"Configuring the Environment Variable for API Key","titles":["Frequently Asked Questions"]},"80":{"title":"Setting the API Key via Preferences.jl","titles":["Frequently Asked Questions"]},"81":{"title":"Understanding the API Keyword Arguments in aigenerate (api_kwargs)","titles":["Frequently Asked Questions"]},"82":{"title":"Instant Access from Anywhere","titles":["Frequently Asked Questions"]},"83":{"title":"Open Source Alternatives","titles":["Frequently Asked Questions"]},"84":{"title":"Setup Guide for Ollama","titles":["Frequently Asked Questions"]},"85":{"title":"Changing the Default Model or Schema","titles":["Frequently Asked Questions"]},"86":{"title":"Using Custom API Providers like Azure or Databricks","titles":["Frequently Asked Questions"]},"87":{"title":"How to have Multi-turn Conversations?","titles":["Frequently Asked Questions"]},"88":{"title":"How to have typed responses?","titles":["Frequently Asked Questions"]},"89":{"title":"How to quickly create a prompt template?","titles":["Frequently Asked Questions"]},"90":{"title":"Do we have a RecursiveCharacterTextSplitter like Langchain?","titles":["Frequently Asked Questions"]},"91":{"title":"How would I fine-tune a model?","titles":["Frequently Asked Questions"]},"92":{"title":"Can I see how my prompt is rendered / what is sent to the API?","titles":["Frequently Asked Questions"]},"93":{"title":"Automatic Logging / Tracing","titles":["Frequently Asked Questions"]},"94":{"title":"Getting Started","titles":[]},"95":{"title":"Prerequisites","titles":["Getting Started"]},"96":{"title":"Installation","titles":["Getting Started"]},"97":{"title":"Quick Start with @ai_str","titles":["Getting Started"]},"98":{"title":"Using aigenerate with placeholders","titles":["Getting Started"]},"99":{"title":"How It Works","titles":[]},"100":{"title":"Key Concepts","titles":["How It Works"]},"101":{"title":"API/Model Providers","titles":["How It Works","Key Concepts"]},"102":{"title":"Schemas","titles":["How It Works","Key Concepts"]},"103":{"title":"Prompts","titles":["How It Works","Key Concepts"]},"104":{"title":"Messages","titles":["How It Works","Key Concepts"]},"105":{"title":"Prompt Templates","titles":["How It Works","Key Concepts"]},"106":{"title":"ai* Functions Overview","titles":["How It Works","Key Concepts"]},"107":{"title":"Walkthrough Example for aigenerate","titles":["How It Works"]},"108":{"title":"Walkthrough Example for aiextract","titles":["How It Works"]},"109":{"title":"Basic-Rag Templates","titles":[]},"110":{"title":"Template: RAGAnswerFromContext","titles":["Basic-Rag Templates"]},"111":{"title":"Ranking Templates","titles":[]},"112":{"title":"Template: RAGRankGPT","titles":["Ranking Templates"]},"113":{"title":"Metadata Templates","titles":[]},"114":{"title":"Template: RAGExtractMetadataLong","titles":["Metadata Templates"]},"115":{"title":"Template: RAGExtractMetadataShort","titles":["Metadata Templates"]},"116":{"title":"Refinement Templates","titles":[]},"117":{"title":"Template: RAGAnswerRefiner","titles":["Refinement Templates"]},"118":{"title":"Template: RAGWebSearchRefiner","titles":["Refinement Templates"]},"119":{"title":"Evaluation Templates","titles":[]},"120":{"title":"Template: RAGCreateQAFromContext","titles":["Evaluation Templates"]},"121":{"title":"Template: RAGJudgeAnswerFromContext","titles":["Evaluation Templates"]},"122":{"title":"Template: RAGJudgeAnswerFromContextShort","titles":["Evaluation Templates"]},"123":{"title":"Query-Transformations Templates","titles":[]},"124":{"title":"Template: RAGJuliaQueryHyDE","titles":["Query-Transformations Templates"]},"125":{"title":"Template: RAGQueryHyDE","titles":["Query-Transformations Templates"]},"126":{"title":"Template: RAGQueryKeywordExpander","titles":["Query-Transformations Templates"]},"127":{"title":"Template: RAGQueryOptimizer","titles":["Query-Transformations Templates"]},"128":{"title":"Template: RAGQuerySimplifier","titles":["Query-Transformations Templates"]},"129":{"title":"Code-Fixing Templates","titles":[]},"130":{"title":"Template: CodeFixerRCI","titles":["Code-Fixing Templates"]},"131":{"title":"Template: CodeFixerShort","titles":["Code-Fixing Templates"]},"132":{"title":"Template: CodeFixerTiny","titles":["Code-Fixing Templates"]},"133":{"title":"Feedback Templates","titles":[]},"134":{"title":"Template: FeedbackFromEvaluator","titles":["Feedback Templates"]},"135":{"title":"Classification Templates","titles":[]},"136":{"title":"Template: InputClassifier","titles":["Classification Templates"]},"137":{"title":"Template: JudgeIsItTrue","titles":["Classification Templates"]},"138":{"title":"Template: QuestionRouter","titles":["Classification Templates"]},"139":{"title":"Critic Templates","titles":[]},"140":{"title":"Template: ChiefEditorTranscriptCritic","titles":["Critic Templates"]},"141":{"title":"Template: GenericTranscriptCritic","titles":["Critic Templates"]},"142":{"title":"Template: JuliaExpertTranscriptCritic","titles":["Critic Templates"]},"143":{"title":"Xml-Formatted Templates","titles":[]},"144":{"title":"Template: ExtractDataCoTXML","titles":["Xml-Formatted Templates"]},"145":{"title":"Template: ExtractDataXML","titles":["Xml-Formatted Templates"]},"146":{"title":"Extraction Templates","titles":[]},"147":{"title":"Template: ExtractData","titles":["Extraction Templates"]},"148":{"title":"General Templates","titles":[]},"149":{"title":"Template: BlankSystemUser","titles":["General Templates"]},"150":{"title":"Template: PromptEngineerForTask","titles":["General Templates"]},"151":{"title":"Visual Templates","titles":[]},"152":{"title":"Template: BlogTitleImageGenerator","titles":["Visual Templates"]},"153":{"title":"Template: OCRTask","titles":["Visual Templates"]},"154":{"title":"Persona-Task Templates","titles":[]},"155":{"title":"Template: AnalystChaptersInTranscript","titles":["Persona-Task Templates"]},"156":{"title":"Template: AnalystDecisionsInTranscript","titles":["Persona-Task Templates"]},"157":{"title":"Template: AnalystThemesInResponses","titles":["Persona-Task Templates"]},"158":{"title":"Theme 1: [Theme Description]","titles":[]},"159":{"title":"Theme 2: [Theme Description]","titles":[]},"160":{"title":"Template: AssistantAsk","titles":["Theme 2: [Theme Description]"]},"161":{"title":"Template: ConversationLabeler","titles":["Theme 2: [Theme Description]"]},"162":{"title":"Template: DetailOrientedTask","titles":["Theme 2: [Theme Description]"]},"163":{"title":"Template: DrafterEmailBrief","titles":["Theme 2: [Theme Description]"]},"164":{"title":"Template: GenericTopicExpertAsk","titles":["Theme 2: [Theme Description]"]},"165":{"title":"Template: GenericWriter","titles":["Theme 2: [Theme Description]"]},"166":{"title":"Template: JavaScriptExpertAsk","titles":["Theme 2: [Theme Description]"]},"167":{"title":"Template: JuliaBlogWriter","titles":["Theme 2: [Theme Description]"]},"168":{"title":"Template: JuliaExpertAsk","titles":["Theme 2: [Theme Description]"]},"169":{"title":"Template: JuliaExpertCoTTask","titles":["Theme 2: [Theme Description]"]},"170":{"title":"Template: JuliaExpertTestCode","titles":["Theme 2: [Theme Description]"]},"171":{"title":"Template: JuliaRecapCoTTask","titles":["Theme 2: [Theme Description]"]},"172":{"title":"Template: JuliaRecapTask","titles":["Theme 2: [Theme Description]"]},"173":{"title":"Template: LinuxBashExpertAsk","titles":["Theme 2: [Theme Description]"]},"174":{"title":"Template: StorytellerExplainSHAP","titles":["Theme 2: [Theme Description]"]},"175":{"title":"Xml-Formatted Templates","titles":["Theme 2: [Theme Description]"]},"176":{"title":"Template: JuliaExpertAskXML","titles":["Theme 2: [Theme Description]","Xml-Formatted Templates"]},"177":{"title":"Template: JuliaExpertCoTTaskXML","titles":["Theme 2: [Theme Description]","Xml-Formatted Templates"]},"178":{"title":"Template: JuliaExpertTestCodeXML","titles":["Theme 2: [Theme Description]","Xml-Formatted Templates"]},"179":{"title":"Reference for AgentTools","titles":[]},"180":{"title":"Reference for APITools","titles":[]},"181":{"title":"Reference","titles":[]},"182":{"title":"Reference for Experimental Module","titles":[]},"183":{"title":"Reference for RAGTools","titles":[]}},"dirtCount":0,"index":[["θ",{"2":{"179":1}}],["β",{"2":{"179":1}}],["α",{"2":{"179":2}}],["→",{"2":{"108":1}}],["zoom",{"2":{"108":1}}],["zshrc",{"2":{"79":1}}],["zero",{"2":{"58":1,"170":1,"178":1,"181":4}}],["~300",{"2":{"181":3}}],["~0",{"2":{"77":1}}],["~",{"2":{"73":1,"79":1,"82":1}}],["~word",{"2":{"67":1,"183":1}}],["~words",{"2":{"67":1,"183":1}}],["^",{"2":{"58":2,"181":2}}],["÷",{"2":{"52":1,"67":1,"179":1,"183":1}}],["├─",{"2":{"52":9,"179":11}}],["👋",{"2":{"181":1}}],["😊",{"2":{"42":1}}],["😃",{"2":{"2":1,"52":1,"179":1}}],["905",{"2":{"181":1}}],["909",{"2":{"161":1}}],["93",{"2":{"179":1}}],["911",{"2":{"108":2}}],["911t",{"2":{"88":2}}],["94",{"2":{"52":1,"179":1}}],["9999999999999982",{"2":{"47":1}}],["99",{"2":{"28":1,"179":1}}],["9",{"2":{"22":1,"23":1,"26":1,"31":1,"181":2,"183":4}}],["9examples",{"2":{"7":1}}],["|im",{"2":{"181":4}}],["|",{"2":{"21":2,"51":2,"52":1,"179":1}}],["|>",{"2":{"13":3,"20":1,"24":1,"51":1,"52":4,"58":1,"88":2,"93":4,"106":1,"179":5,"181":16,"183":1}}],["y`",{"2":{"171":1,"172":1}}],["yarrr",{"2":{"89":2,"181":2}}],["yay",{"2":{"52":1,"179":1}}],["y",{"2":{"52":2,"82":1,"179":4,"181":2,"183":2}}],["years",{"2":{"161":1}}],["yes",{"2":{"41":1,"90":1,"92":1,"98":1}}],["yedi",{"2":{"35":1,"41":2,"181":5}}],["yet",{"2":{"32":1,"52":3,"78":1,"107":1,"141":1,"163":1,"179":2,"181":6,"182":1,"183":2}}],["yellow",{"2":{"21":2,"51":3,"52":5,"179":5}}],["york",{"2":{"181":10}}],["yoda",{"2":{"12":2,"35":1,"41":1,"181":5}}],["youtube",{"2":{"155":1,"156":1}}],["young",{"2":{"12":1,"35":1,"41":1,"181":1}}],["yours",{"2":{"181":13}}],["yourself",{"2":{"41":2,"181":1}}],["your",{"0":{"11":1},"2":{"2":4,"4":1,"8":1,"10":1,"11":3,"12":1,"13":3,"15":1,"22":1,"23":3,"24":9,"26":1,"27":1,"28":1,"29":1,"32":2,"35":2,"37":2,"41":1,"42":1,"52":3,"57":2,"59":1,"60":1,"63":1,"64":1,"67":5,"71":5,"72":1,"73":2,"74":2,"75":4,"76":1,"77":1,"78":2,"79":6,"80":2,"82":1,"84":2,"85":3,"86":4,"87":2,"89":2,"91":1,"92":1,"93":2,"95":5,"96":1,"97":2,"100":1,"102":1,"104":1,"105":4,"106":1,"107":4,"108":3,"117":1,"118":1,"121":1,"124":1,"125":1,"127":2,"130":6,"131":1,"136":1,"138":1,"140":1,"141":2,"142":1,"144":1,"155":3,"156":4,"157":1,"160":2,"161":1,"162":1,"163":1,"164":5,"166":5,"168":2,"169":4,"170":3,"173":5,"174":2,"176":2,"177":4,"178":3,"179":3,"181":54,"183":13}}],["you",{"2":{"0":1,"1":2,"2":3,"4":2,"5":1,"7":6,"8":1,"10":9,"11":9,"12":11,"13":7,"14":2,"15":2,"16":2,"17":3,"18":1,"19":6,"20":4,"21":6,"22":5,"23":12,"24":32,"25":1,"26":9,"27":3,"28":5,"29":5,"30":9,"31":9,"32":2,"33":1,"34":4,"35":3,"37":4,"39":6,"40":2,"41":10,"42":9,"43":1,"46":3,"49":1,"51":4,"52":25,"54":1,"55":1,"57":2,"58":5,"59":1,"60":2,"61":7,"63":7,"65":4,"67":19,"69":1,"71":2,"72":1,"73":6,"74":11,"75":5,"76":3,"77":9,"78":4,"79":5,"80":2,"82":1,"83":2,"84":7,"85":5,"86":6,"87":4,"88":5,"89":21,"91":2,"92":3,"93":12,"95":5,"96":1,"97":3,"98":3,"100":4,"101":3,"102":1,"103":3,"104":1,"105":8,"106":8,"107":8,"108":19,"110":2,"112":1,"114":2,"115":2,"117":3,"118":3,"120":1,"126":2,"128":1,"130":4,"131":2,"132":2,"136":2,"137":1,"138":3,"140":1,"155":3,"156":1,"157":1,"159":1,"160":2,"162":1,"163":1,"164":1,"165":3,"166":1,"167":4,"168":2,"169":3,"170":3,"172":1,"173":1,"174":1,"176":2,"177":2,"178":3,"179":24,"180":1,"181":211,"183":36}}],["└─",{"2":{"52":9,"179":13}}],["└",{"2":{"11":1}}],["┌",{"2":{"11":1}}],["70",{"2":{"181":7}}],["70b",{"2":{"29":3}}],["72",{"2":{"181":6}}],["74",{"2":{"98":1}}],["754",{"2":{"138":1}}],["75",{"2":{"67":1,"183":1}}],["77",{"2":{"52":1,"179":1}}],["786",{"2":{"131":1}}],["78",{"2":{"31":1}}],["787",{"2":{"16":1,"181":1}}],["7",{"2":{"11":1,"52":7,"61":1,"163":1,"179":8,"181":2,"183":4}}],["7examples",{"2":{"7":1}}],["`top",{"2":{"183":1}}],["`test",{"2":{"67":1,"183":1}}],["`textchunker",{"2":{"67":1,"183":1}}],["`build",{"2":{"183":1}}],["`begin`",{"2":{"171":1,"172":1}}],["`1",{"2":{"181":1}}],["`1+1`",{"2":{"181":5}}],["`2`",{"2":{"181":5}}],["`empty",{"2":{"181":2}}],["`error`",{"2":{"181":2}}],["`end`",{"2":{"171":1,"172":1}}],["`example`",{"2":{"24":2}}],["`$`",{"2":{"171":1,"172":1}}],["`$a+$a`",{"2":{"40":1,"181":6}}],["`while`",{"2":{"171":1,"172":1}}],["`function`",{"2":{"171":1,"172":1}}],["`function",{"2":{"171":1,"172":1}}],["`for`",{"2":{"171":1,"172":1}}],["`false`",{"2":{"108":1}}],["`fahrenheit`",{"2":{"19":1}}],["`image",{"2":{"181":2}}],["`isx",{"2":{"171":1,"172":1}}],["`if",{"2":{"171":1,"172":1}}],["`index`",{"2":{"67":2,"183":3}}],["`innerjoin`",{"2":{"7":1}}],["`x",{"2":{"171":2,"172":2}}],["`other",{"2":{"156":1}}],["`out",{"2":{"52":1,"179":1}}],["`dict",{"2":{"171":1,"172":1}}],["`distributed`",{"2":{"61":1}}],["`data`",{"2":{"147":1}}],["`register",{"2":{"181":1}}],["`return",{"2":{"106":1}}],["`run",{"2":{"179":1}}],["`ragresult`",{"2":{"67":1,"183":1}}],["`you",{"2":{"104":1}}],["`score",{"2":{"183":1}}],["`schema",{"2":{"108":1}}],["`schema`",{"2":{"27":1,"28":1}}],["`streamcallback",{"2":{"181":3}}],["`success",{"2":{"52":1,"179":1}}],["`maybeextract",{"2":{"181":1}}],["`map`",{"2":{"74":1}}],["`model",{"2":{"181":1}}],["`model`",{"2":{"28":1}}],["`message`",{"2":{"181":2}}],["`msg",{"2":{"37":1}}],["`processor`",{"2":{"183":1}}],["`pt",{"2":{"27":1,"28":1}}],["`pkg`",{"2":{"24":1}}],["`usermessage`",{"2":{"52":1,"179":1}}],["`using`",{"2":{"24":1}}],["`unit`",{"2":{"19":1}}],["`local",{"2":{"181":1}}],["`location`",{"2":{"19":1}}],["`last",{"2":{"21":2,"51":2,"52":2,"179":2}}],["`number`",{"2":{"171":1,"172":1}}],["`nothing`",{"2":{"108":1}}],["`n",{"2":{"21":1,"51":1,"52":1,"179":1}}],["`condition`",{"2":{"108":1}}],["`convert`",{"2":{"108":1}}],["`conversation`",{"2":{"52":1,"106":1,"179":1}}],["`config",{"2":{"52":1,"179":1}}],["`config`",{"2":{"21":1,"51":1,"52":1,"179":1}}],["`celsius`",{"2":{"19":1}}],["`abstractstring`",{"2":{"171":1,"172":1}}],["`a",{"2":{"171":1,"172":1}}],["`answerer",{"2":{"65":1}}],["`answerer`",{"2":{"65":1}}],["`answer",{"2":{"65":1}}],["`add`",{"2":{"24":1,"170":1,"178":1}}],["`aigenerate",{"2":{"52":1,"179":1}}],["`aicall`",{"2":{"21":1,"51":1,"52":2,"179":2}}],["`airag`",{"2":{"6":1,"67":1,"183":1}}],["`api",{"2":{"21":1,"37":1,"51":1,"52":1,"179":1}}],["`ask`",{"2":{"13":1,"24":2,"181":2}}],["``",{"2":{"13":1,"181":1}}],["```plaintext",{"2":{"131":1,"132":1}}],["````",{"2":{"52":1,"179":1}}],["```julia",{"2":{"24":2,"58":1,"130":2,"131":1,"170":2,"178":2,"179":1,"181":3,"183":1}}],["```sql",{"2":{"20":1,"181":2}}],["```",{"2":{"11":2,"24":2,"52":1,"121":1,"122":1,"130":1,"131":1,"132":1,"155":2,"156":2,"170":2,"178":2,"179":2,"183":1}}],["`",{"2":{"11":2,"21":1,"24":2,"27":1,"28":1,"37":2,"51":1,"52":2,"61":1,"65":1,"67":1,"108":2,"130":2,"167":2,"170":6,"171":8,"172":8,"178":6,"179":3,"181":6,"183":1}}],["│",{"2":{"7":12,"11":5,"52":14,"179":16}}],["$date",{"2":{"181":2}}],["$location",{"2":{"181":2}}],["$lower",{"2":{"52":1,"179":1}}],["$f",{"2":{"108":1}}],["$25",{"2":{"108":1}}],["$10",{"2":{"76":1}}],["$50",{"2":{"115":1}}],["$5",{"2":{"76":1}}],["$user",{"2":{"52":1,"179":1}}],["$upper",{"2":{"52":1,"179":1}}],["$",{"2":{"7":3,"52":9,"58":7,"88":1,"97":1,"108":1,"179":9,"181":7}}],["$0",{"2":{"4":1,"11":1,"20":2,"23":1,"26":1,"30":1,"31":1,"61":1,"87":1,"97":2,"98":1,"181":5}}],[">0",{"2":{"183":1}}],[">tryparse",{"2":{"88":1}}],[">x",{"2":{"7":2,"183":1}}],[">",{"2":{"7":1,"13":1,"18":3,"21":1,"31":1,"46":1,"51":1,"52":8,"58":5,"61":1,"65":1,"67":1,"97":1,"108":2,"112":2,"130":2,"155":2,"156":3,"171":1,"172":1,"179":9,"181":28,"183":8}}],["x123",{"2":{"181":2}}],["x^2`",{"2":{"171":1,"172":1}}],["xml",{"0":{"143":1,"175":1},"1":{"144":1,"145":1,"176":1,"177":1,"178":1},"2":{"144":1,"145":1,"176":1,"177":1,"178":1,"181":1}}],["x3c",{"2":{"20":1,"27":1,"52":6,"58":13,"61":1,"63":1,"67":5,"88":5,"130":4,"144":4,"145":4,"155":3,"156":3,"163":2,"170":1,"176":2,"177":12,"178":17,"179":11,"181":163,"183":116}}],["xyz",{"2":{"11":3,"52":1,"77":1,"106":2,"179":3,"183":1}}],["x",{"2":{"7":4,"21":2,"46":2,"51":2,"52":6,"74":4,"82":1,"88":2,"171":2,"172":2,"179":17,"181":11,"183":11}}],["x26",{"2":{"4":1,"58":2,"120":1,"181":12,"183":2}}],["08",{"2":{"181":2}}],["02",{"2":{"181":1}}],["024",{"2":{"7":1}}],["07",{"2":{"181":1}}],["03",{"2":{"181":5}}],["05",{"2":{"179":1,"181":7}}],["0s",{"2":{"179":1}}],["0011",{"2":{"181":1}}],["0015",{"2":{"181":3}}],["002",{"2":{"181":3}}],["000",{"2":{"58":2,"67":1,"74":3,"181":2,"183":10}}],["0001",{"2":{"30":1,"31":1,"77":2,"97":1,"98":1}}],["0002",{"2":{"11":1}}],["0045",{"2":{"20":1,"181":1}}],["0117",{"2":{"20":1,"181":2}}],["014",{"2":{"7":7}}],["015",{"2":{"7":2}}],["0dict",{"2":{"7":3}}],["0",{"2":{"6":2,"10":2,"16":2,"19":1,"22":1,"23":2,"26":2,"31":1,"32":1,"33":1,"42":1,"47":2,"52":8,"57":2,"58":4,"61":6,"67":24,"77":1,"84":2,"87":2,"89":2,"97":2,"106":1,"110":1,"115":1,"122":1,"124":1,"125":1,"126":1,"127":1,"128":1,"131":1,"132":1,"134":1,"138":1,"140":1,"141":1,"142":1,"144":1,"145":1,"152":1,"161":1,"164":1,"165":1,"166":1,"167":1,"169":1,"170":3,"172":1,"173":1,"174":1,"177":1,"178":4,"179":29,"181":66,"183":84}}],["3rd",{"2":{"181":7}}],["39931",{"2":{"179":2}}],["390",{"2":{"124":1}}],["383",{"2":{"165":1}}],["31",{"2":{"97":1}}],["344",{"2":{"166":1}}],["34",{"2":{"61":1,"181":1}}],["34900",{"2":{"58":2,"181":2}}],["34315",{"2":{"52":1,"179":1}}],["374",{"2":{"173":1}}],["375",{"2":{"110":1}}],["37581",{"2":{"52":1,"179":1}}],["37",{"2":{"52":1,"179":1}}],["354",{"2":{"125":1}}],["35",{"2":{"52":4,"58":2,"179":4,"181":2}}],["35603",{"2":{"52":1,"179":1}}],["32000",{"2":{"179":3}}],["32991",{"2":{"52":5,"179":5}}],["32",{"2":{"52":2,"97":1,"179":2}}],["337",{"2":{"164":1}}],["33",{"2":{"52":5,"179":5}}],["33333dict",{"2":{"7":1}}],["3x",{"2":{"52":1,"179":1}}],["366",{"2":{"136":1}}],["36",{"2":{"52":1,"179":1}}],["362",{"2":{"20":1,"181":1}}],["3000",{"2":{"170":1,"178":1}}],["300",{"2":{"167":1}}],["30088",{"2":{"52":2,"179":2}}],["30",{"2":{"19":2,"77":1,"124":1,"125":1,"181":15,"183":2}}],["3examples",{"2":{"7":1}}],["3",{"2":{"6":2,"7":11,"10":1,"11":1,"24":1,"37":1,"52":3,"58":3,"61":4,"74":1,"90":1,"93":1,"97":1,"98":3,"106":1,"108":2,"114":3,"115":2,"130":4,"131":1,"157":3,"161":1,"163":1,"170":2,"171":1,"172":1,"178":2,"179":8,"181":26,"183":5}}],["+",{"2":{"6":1,"52":2,"61":2,"63":1,"66":1,"67":1,"108":2,"170":1,"178":1,"179":2,"181":5,"183":1}}],["5th",{"2":{"183":1}}],["595",{"2":{"177":1}}],["570",{"2":{"144":1}}],["57694",{"2":{"52":1,"179":1}}],["519",{"2":{"145":1,"169":1}}],["514",{"2":{"127":1}}],["512",{"2":{"52":5,"179":5,"183":1}}],["5=best",{"2":{"122":1}}],["50m",{"2":{"181":2}}],["504",{"2":{"152":1}}],["500",{"2":{"147":1,"183":2}}],["50086",{"2":{"52":4,"179":4}}],["50",{"2":{"52":4,"87":1,"179":4}}],["52910",{"2":{"52":4,"179":4}}],["55394",{"2":{"52":1,"179":1}}],["5examples",{"2":{"7":1}}],["5",{"0":{"78":1},"2":{"6":5,"11":2,"20":1,"22":3,"28":1,"30":2,"31":1,"37":3,"40":1,"47":1,"52":11,"55":1,"61":3,"65":2,"67":11,"74":1,"75":1,"78":2,"84":2,"87":1,"93":1,"95":1,"97":3,"100":1,"104":1,"107":1,"114":2,"115":1,"121":14,"122":2,"126":1,"157":2,"161":2,"163":2,"170":1,"174":1,"178":1,"179":11,"180":1,"181":50,"183":25}}],["837",{"2":{"181":1}}],["8k",{"2":{"181":1}}],["84",{"2":{"179":1}}],["886",{"2":{"167":1}}],["82",{"2":{"52":1,"179":1}}],["87",{"2":{"30":1}}],["8755f69180b7ac7ee76a69ae68ec36872a116ad4",{"2":{"20":1,"181":2}}],["8x7b",{"2":{"28":1,"37":1,"108":1}}],["80k",{"2":{"183":3}}],["80kg",{"2":{"19":1,"181":7}}],["8080",{"2":{"28":1,"65":3,"67":3,"181":2,"183":3}}],["8081",{"2":{"23":1,"181":2}}],["80",{"2":{"19":1,"181":4,"183":6}}],["8examples",{"2":{"7":1}}],["8",{"2":{"6":1,"52":1,"179":1,"183":4}}],["64",{"2":{"183":2}}],["636",{"2":{"112":1}}],["60",{"2":{"52":3,"74":1,"77":1,"181":6,"183":2}}],["67",{"2":{"52":10,"179":11}}],["67dict",{"2":{"7":3}}],["69",{"2":{"22":1,"181":2}}],["66667dict",{"2":{"7":3}}],["6examples",{"2":{"7":1}}],["6",{"2":{"6":1,"7":1,"42":1,"52":6,"58":1,"74":1,"179":7,"181":4,"183":5}}],["48",{"2":{"179":1}}],["48343",{"2":{"52":1,"179":1}}],["420",{"2":{"122":1,"152":1}}],["429",{"0":{"75":1}}],["45f9b43becbd31601223824d0459e46e7d38b0d1",{"2":{"58":1,"181":6,"183":1}}],["4k",{"2":{"58":1,"181":1}}],["46",{"2":{"89":1,"181":1}}],["46632",{"2":{"52":1,"179":1}}],["46839",{"2":{"52":2,"179":2}}],["43094",{"2":{"52":1,"179":1}}],["43",{"2":{"52":1,"179":1}}],["44816",{"2":{"52":2,"179":2}}],["41",{"2":{"52":1,"134":1,"179":1}}],["4examples",{"2":{"7":1}}],["402",{"2":{"150":1}}],["40796033843072876",{"2":{"47":1}}],["4096×2",{"2":{"22":1,"46":1,"181":1}}],["4096",{"2":{"22":1,"45":2,"46":1,"47":1,"181":2}}],["40",{"2":{"7":8,"61":1,"181":2}}],["4",{"2":{"6":3,"7":2,"15":5,"23":1,"24":1,"26":1,"52":10,"58":1,"61":3,"93":3,"97":1,"98":1,"114":1,"157":1,"161":1,"179":10,"181":10,"183":14}}],["q4",{"2":{"28":1,"37":1}}],["qaevalresult",{"2":{"6":1,"183":5}}],["qaevalitems",{"2":{"183":1}}],["qaevalitem",{"2":{"4":1,"5":1,"67":5,"183":13}}],["qa",{"2":{"4":1,"6":2,"7":5,"60":1,"67":8,"181":3,"183":38}}],["q",{"0":{"4":1,"5":1,"6":1},"2":{"3":1,"4":1,"6":1,"67":4,"183":4}}],["quantization",{"2":{"161":1,"183":6}}],["quantum",{"2":{"61":1}}],["quarter",{"2":{"67":1,"183":1}}],["quality=",{"2":{"181":1}}],["quality`",{"2":{"181":1}}],["quality",{"2":{"3":2,"5":1,"7":1,"8":1,"13":2,"17":1,"24":3,"90":1,"105":1,"107":2,"121":2,"122":2,"155":1,"160":1,"164":1,"166":1,"168":1,"173":1,"176":1,"181":6,"183":2}}],["queried",{"2":{"181":2}}],["queries",{"2":{"136":1}}],["query",{"0":{"123":1},"1":{"124":1,"125":1,"126":1,"127":1,"128":1},"2":{"55":3,"58":7,"66":3,"112":4,"117":7,"118":8,"124":7,"125":5,"126":13,"127":11,"128":8,"180":3,"181":15,"183":35}}],["question=",{"2":{"183":4}}],["question>",{"2":{"176":2}}],["questionrouter",{"0":{"138":1},"2":{"181":1}}],["questions",{"0":{"68":1},"1":{"69":1,"70":1,"71":1,"72":1,"73":1,"74":1,"75":1,"76":1,"77":1,"78":1,"79":1,"80":1,"81":1,"82":1,"83":1,"84":1,"85":1,"86":1,"87":1,"88":1,"89":1,"90":1,"91":1,"92":1,"93":1},"2":{"3":1,"4":1,"13":1,"15":1,"23":1,"24":2,"26":1,"31":1,"34":1,"37":1,"42":1,"58":1,"61":1,"67":3,"97":1,"110":1,"124":2,"160":1,"164":1,"166":1,"168":1,"173":1,"176":1,"181":5,"183":8}}],["question",{"2":{"2":5,"5":1,"6":4,"7":2,"8":1,"13":1,"23":1,"24":7,"26":1,"60":2,"61":8,"64":2,"65":3,"67":30,"77":1,"103":1,"104":1,"105":2,"107":2,"110":5,"112":4,"117":1,"118":1,"120":7,"121":8,"122":6,"125":1,"138":7,"155":1,"156":1,"157":4,"159":2,"160":1,"164":1,"166":1,"168":1,"173":1,"181":6,"183":74}}],["quirks",{"2":{"78":1,"88":1}}],["quicker",{"2":{"179":1}}],["quick",{"0":{"97":1},"2":{"29":1,"73":1,"89":2,"95":1,"163":1,"181":6}}],["quickly",{"0":{"89":1},"2":{"11":1,"58":1,"59":1,"155":1,"181":1,"183":1}}],["quite",{"2":{"28":1,"66":1,"74":1,"90":1,"108":1,"183":1}}],["quote",{"2":{"157":1}}],["quotes",{"2":{"130":3,"155":1,"183":1}}],["quota",{"2":{"75":1}}],["quot",{"0":{"73":4,"74":2,"75":2},"2":{"1":4,"2":2,"6":2,"7":8,"10":8,"11":6,"13":4,"15":10,"17":8,"18":2,"21":6,"24":2,"28":2,"29":2,"30":2,"31":2,"32":2,"33":2,"34":2,"37":4,"42":10,"49":6,"51":6,"52":26,"55":6,"58":52,"61":2,"63":2,"67":20,"73":8,"74":4,"75":4,"77":2,"78":4,"79":4,"80":6,"82":2,"84":2,"85":10,"86":6,"87":6,"88":8,"90":16,"91":6,"92":4,"93":2,"95":6,"97":4,"98":2,"100":2,"101":4,"102":6,"104":8,"105":6,"106":8,"107":6,"108":16,"114":2,"115":2,"120":2,"134":2,"155":2,"156":2,"157":2,"158":2,"159":2,"170":2,"172":2,"174":2,"178":2,"179":64,"180":6,"181":292,"183":41}}],["=context",{"2":{"183":1}}],["=1",{"2":{"183":1}}],["=template",{"2":{"181":2}}],["=pt",{"2":{"181":3}}],["=prompt",{"2":{"181":1}}],["=url",{"2":{"181":1}}],["=user",{"2":{"149":1}}],["=system",{"2":{"149":1,"181":1}}],["=main",{"2":{"52":1,"181":1}}],["=nothing",{"2":{"52":2,"179":4,"183":2}}],["==true",{"2":{"181":1}}],["==",{"2":{"21":2,"51":3,"52":4,"88":1,"170":4,"178":4,"179":13,"181":4,"183":2}}],["=wordcount",{"2":{"13":1,"181":2}}],["=>dict",{"2":{"108":6}}],["=>",{"2":{"6":8,"7":1,"88":3,"92":4,"107":4,"108":16,"171":1,"172":1,"181":41,"183":3}}],["=",{"2":{"1":2,"2":6,"4":4,"6":8,"7":13,"10":1,"12":6,"13":7,"14":2,"15":3,"16":4,"17":2,"18":4,"19":2,"20":3,"21":14,"22":5,"23":7,"24":8,"25":1,"26":3,"27":5,"28":1,"29":9,"30":1,"31":3,"32":1,"34":2,"35":2,"37":2,"39":2,"40":5,"41":2,"42":8,"43":1,"45":2,"46":5,"47":3,"48":1,"51":14,"52":68,"55":4,"58":28,"59":1,"61":8,"63":2,"65":28,"67":146,"78":3,"79":2,"80":1,"82":1,"85":2,"86":1,"87":2,"88":11,"90":5,"92":9,"93":6,"95":2,"97":1,"98":1,"102":1,"105":1,"106":3,"107":10,"108":20,"169":1,"170":1,"171":2,"172":1,"177":1,"178":1,"179":134,"180":7,"181":667,"183":423}}],["jxnl",{"2":{"155":1,"156":1}}],["javascript",{"2":{"166":2}}],["javascriptexpertask",{"0":{"166":1}}],["jargon",{"2":{"126":1}}],["jarvislabs",{"2":{"91":1}}],["jack",{"2":{"19":1,"89":4,"181":9}}],["james",{"2":{"19":1,"181":9}}],["jane",{"2":{"7":4,"114":2}}],["jedi",{"2":{"12":3,"35":1,"181":1}}],["joy",{"2":{"41":1}}],["job",{"2":{"7":4,"77":1,"181":5}}],["job=",{"2":{"7":1}}],["jobs",{"2":{"7":7}}],["john",{"2":{"7":3,"40":2,"87":6,"92":2}}],["joint",{"2":{"183":1}}],["join",{"2":{"6":3,"7":23,"108":2,"181":3}}],["joining",{"2":{"5":2,"6":2,"7":2,"70":1}}],["joins",{"2":{"2":1,"5":3,"6":1,"7":15,"183":1}}],["joinpath",{"2":{"2":2,"24":1,"181":3}}],["jsonl",{"2":{"91":2,"181":1}}],["json",{"2":{"4":2,"24":2,"89":1,"91":1,"93":1,"107":1,"108":14,"181":40}}],["json3",{"2":{"1":1,"4":2,"22":1,"45":1,"108":7,"181":3}}],["jump",{"2":{"179":1,"181":1}}],["judgment",{"2":{"163":1}}],["judging",{"2":{"121":1,"183":1}}],["judge=",{"2":{"183":1}}],["judgerating",{"2":{"181":1,"183":2}}],["judgeisittrue",{"0":{"137":1},"2":{"13":1,"17":2,"181":7}}],["judgeallscores",{"2":{"6":1,"181":1,"183":2}}],["judged",{"2":{"6":2}}],["judge",{"2":{"2":1,"6":3,"7":2,"10":1,"17":1,"52":1,"106":1,"121":4,"122":3,"137":1,"179":1,"181":3,"183":8}}],["juicy",{"2":{"31":2,"108":8}}],["just",{"2":{"2":1,"10":1,"13":1,"23":1,"24":4,"26":1,"31":1,"52":1,"55":1,"73":1,"75":1,"82":1,"84":2,"87":1,"88":1,"105":1,"106":1,"108":4,"110":1,"117":1,"118":1,"138":1,"161":2,"179":1,"180":1,"181":13}}],["juliaqa",{"2":{"183":1}}],["juliaquestion",{"2":{"61":1}}],["juliakwargs",{"2":{"183":1}}],["juliakw",{"2":{"183":3}}],["juliakeywordsprocessor",{"2":{"183":1}}],["juliakeywordsindexer",{"2":{"183":1}}],["julianotagger",{"2":{"183":1}}],["julianotagfilter",{"2":{"183":1}}],["julianoreranker",{"2":{"183":1}}],["julianorephraser",{"2":{"183":1}}],["julianorefiner",{"2":{"183":1}}],["julianoprocessor",{"2":{"183":1}}],["julianopostprocessor",{"2":{"183":1}}],["julianoembedder",{"2":{"183":1}}],["julianew",{"2":{"12":1,"181":1}}],["juliahcat",{"2":{"183":1}}],["juliahamming",{"2":{"183":1}}],["juliahandle",{"2":{"181":1}}],["juliahyderephraser",{"2":{"183":1}}],["juliahtmlstyler",{"2":{"183":1}}],["juliais",{"2":{"181":1}}],["juliainitialize",{"2":{"181":1}}],["juliaindex",{"2":{"61":1,"67":2,"183":6}}],["juliaweather",{"2":{"181":1}}],["juliawrap",{"2":{"58":2,"181":7}}],["juliagetpropertynested",{"2":{"183":1}}],["juliaget",{"2":{"181":1,"183":7}}],["juliagenerate",{"2":{"67":1,"181":1,"183":1}}],["juliagroqopenaischema",{"2":{"181":1}}],["juliagamma",{"2":{"179":1}}],["juliabin",{"2":{"183":2}}],["juliabinary",{"2":{"183":1}}],["juliabinarycosinesimilarity",{"2":{"183":1}}],["juliabinarybatchembedder",{"2":{"183":1}}],["juliabitpacked",{"2":{"183":1}}],["juliabitpackedcosinesimilarity",{"2":{"183":1}}],["juliabitpackedbatchembedder",{"2":{"183":1}}],["juliabatchembedder",{"2":{"183":1}}],["juliabm25similarity",{"2":{"183":1}}],["juliabeta",{"2":{"179":1}}],["juliablogwriter",{"0":{"167":1}}],["juliabuild",{"2":{"67":3,"181":3,"183":5}}],["juliaollama",{"2":{"181":1}}],["juliaopentagger",{"2":{"183":1}}],["juliaopenai",{"2":{"181":3}}],["juliaopenrouteropenaischema",{"2":{"181":1}}],["juliaobj",{"2":{"108":1}}],["juliaoutput",{"2":{"51":1}}],["juliaout",{"2":{"21":1,"52":2,"179":2}}],["juliaupdate",{"2":{"181":1}}],["juliaunique",{"2":{"181":1}}],["juliaunwrap",{"2":{"93":1}}],["juliausermessagewithimages",{"2":{"181":1}}],["juliausermessage",{"2":{"181":1}}],["juliausing",{"2":{"1":1,"10":1,"12":1,"13":1,"16":1,"20":1,"21":1,"24":1,"25":1,"32":2,"37":1,"47":1,"48":1,"53":1,"57":1,"88":1,"90":1,"93":3,"96":1,"106":1,"107":2,"108":1,"181":10,"183":1}}],["juliauct",{"2":{"179":1}}],["juliaalign",{"2":{"183":1}}],["juliaalltagfilter",{"2":{"183":1}}],["juliaalternative",{"2":{"181":1}}],["juliaadvancedretriever",{"2":{"183":1}}],["juliaadvancedgenerator",{"2":{"183":1}}],["juliaadd",{"2":{"179":1,"183":1}}],["juliaabstractretriever",{"2":{"183":1}}],["juliaabstractmultiindex",{"2":{"183":1}}],["juliaabstractindexbuilder",{"2":{"183":1}}],["juliaabstractgenerator",{"2":{"183":1}}],["juliaabstractchunkindex",{"2":{"183":1}}],["juliaabstractcandidatechunks",{"2":{"183":1}}],["juliaabstracttool",{"2":{"181":1}}],["juliaa=1",{"2":{"181":1}}],["juliaaai",{"2":{"181":1}}],["juliaauth",{"2":{"181":1}}],["juliaa",{"2":{"181":2,"183":1}}],["juliaapi",{"2":{"181":2}}],["juliaanswer",{"2":{"183":1}}],["juliaanytagfilter",{"2":{"183":1}}],["juliaanthropic",{"2":{"181":2}}],["juliaanthropicschema",{"2":{"181":1}}],["juliaannotatednode",{"2":{"183":1}}],["juliaannotater",{"2":{"67":1,"183":1}}],["juliaannotate",{"2":{"67":2,"183":2}}],["juliaagenttools",{"2":{"179":1}}],["juliaassume",{"2":{"67":1,"183":1}}],["juliaaitools",{"2":{"181":3}}],["juliaaitemplate",{"2":{"181":1}}],["juliaaitemplates",{"2":{"89":1,"181":2}}],["juliaaiimage",{"2":{"181":2}}],["juliaaimessage",{"2":{"181":1}}],["juliaaiscan",{"2":{"179":1,"181":3}}],["juliaaiextract",{"2":{"179":1,"181":3}}],["juliaaiembed",{"2":{"30":1,"31":1,"179":1,"181":3}}],["juliaairag",{"2":{"67":1,"183":1}}],["juliaairetry",{"2":{"52":1,"179":1}}],["juliaaicodefixer",{"2":{"52":1,"179":2}}],["juliaaicode",{"2":{"52":1,"181":1}}],["juliaaicall",{"2":{"52":3,"179":6}}],["juliaaiclassify",{"2":{"17":2,"179":1,"181":5}}],["juliaaigenerate",{"2":{"52":1,"89":2,"179":1,"181":8}}],["juliaai",{"2":{"34":1,"87":1,"97":2,"181":1}}],["juliaload",{"2":{"181":2,"183":1}}],["julialocalserveropenaischema",{"2":{"181":1}}],["juliallmleaderboard",{"2":{"91":1}}],["julialength",{"2":{"58":1,"181":1}}],["julialanguage",{"2":{"114":1}}],["julialang",{"2":{"57":1,"58":1,"60":1,"181":1}}],["juliart",{"2":{"183":1}}],["juliarank",{"2":{"183":2}}],["juliarankgptresult",{"2":{"183":1}}],["juliarankgptreranker",{"2":{"183":1}}],["juliaragresult",{"2":{"183":1}}],["juliaragconfig",{"2":{"183":1}}],["juliaragtools",{"2":{"183":1}}],["juliarun",{"2":{"179":2,"183":2}}],["juliarerank",{"2":{"183":2}}],["juliarefiner",{"2":{"183":1}}],["juliarefine",{"2":{"183":3}}],["juliarender",{"2":{"181":9}}],["juliarendered",{"2":{"107":1}}],["juliaremove",{"2":{"181":1}}],["juliaregister",{"2":{"181":2}}],["juliaretryconfig",{"2":{"179":1}}],["juliaretrieve",{"2":{"67":1,"183":1}}],["juliaretriever",{"2":{"65":1,"67":2,"183":2}}],["juliareciprocal",{"2":{"183":2}}],["juliareceive",{"2":{"183":1}}],["juliarecaptask",{"0":{"172":1}}],["juliarecapcottask",{"0":{"171":1}}],["juliarecursive",{"2":{"58":2,"181":2}}],["juliaresponse",{"2":{"181":1}}],["juliaresize",{"2":{"181":2}}],["juliares",{"2":{"67":1,"183":1}}],["juliaresult",{"2":{"61":1,"106":1,"108":1,"181":4,"183":1}}],["juliaresults",{"2":{"7":1}}],["juliarephrase",{"2":{"65":1,"183":3}}],["juliareplace",{"2":{"58":1,"181":1}}],["juliar",{"2":{"55":2,"180":2}}],["juliaflashranker",{"2":{"183":1}}],["juliafind",{"2":{"181":2,"183":8}}],["juliafinalize",{"2":{"181":3}}],["juliafields",{"2":{"181":4}}],["juliafireworksopenaischema",{"2":{"181":1}}],["juliafilechunker",{"2":{"183":1}}],["juliafilename",{"2":{"24":1}}],["juliafiles",{"2":{"2":1}}],["juliafeedback",{"2":{"108":1}}],["juliafor",{"2":{"52":1,"179":1}}],["juliasplit",{"2":{"183":1}}],["juliaspec",{"2":{"181":1}}],["juliascore",{"2":{"183":1}}],["juliaschema",{"2":{"42":1}}],["juliasubchunkindex",{"2":{"183":1}}],["juliastemmer",{"2":{"183":1}}],["juliastyler",{"2":{"183":1}}],["juliastreamed",{"2":{"181":1}}],["juliastreamchunk",{"2":{"181":1}}],["juliastreamcallback",{"2":{"181":1}}],["juliasimpleretriever",{"2":{"183":1}}],["juliasimplerephraser",{"2":{"183":1}}],["juliasimplerefiner",{"2":{"183":1}}],["juliasimpleindexer",{"2":{"183":1}}],["juliasimplegenerator",{"2":{"183":1}}],["juliasimplebm25retriever",{"2":{"183":1}}],["juliasimpleanswerer",{"2":{"183":1}}],["juliasig",{"2":{"108":3}}],["juliasharegptschema",{"2":{"181":1}}],["juliasave",{"2":{"181":3}}],["juliasaverschema",{"2":{"181":1}}],["juliasample",{"2":{"179":1}}],["juliasamplenode",{"2":{"179":1}}],["juliasetpropertynested",{"2":{"183":1}}],["juliaset",{"2":{"181":2,"183":1}}],["juliaselect",{"2":{"179":1}}],["juliasentences",{"2":{"61":1}}],["juliaserialize",{"2":{"2":1}}],["juliamultiindex",{"2":{"183":1}}],["juliamultifinder",{"2":{"183":1}}],["juliamulticandidatechunks",{"2":{"183":1}}],["juliamerge",{"2":{"183":1}}],["juliamessages",{"2":{"181":1}}],["juliameta",{"2":{"93":1}}],["juliamarkdown",{"2":{"181":1}}],["juliamistralopenaischema",{"2":{"181":1}}],["juliamodelspec",{"2":{"181":1}}],["juliamodel",{"2":{"40":1,"108":2,"181":2}}],["juliamsgs",{"2":{"24":1}}],["juliamsg",{"2":{"12":1,"13":2,"20":1,"22":1,"23":1,"24":1,"26":1,"28":1,"29":1,"30":1,"31":1,"34":1,"39":1,"43":1,"45":2,"46":1,"47":1,"52":1,"67":1,"98":1,"181":14,"183":3}}],["juliadistance",{"2":{"181":1}}],["juliadetect",{"2":{"181":1}}],["juliadecode",{"2":{"181":1}}],["juliadeepseekopenaischema",{"2":{"181":1}}],["juliadatabricksopenaischema",{"2":{"181":1}}],["juliadatamessage",{"2":{"181":1}}],["juliadataexpertask",{"2":{"24":2}}],["juliadry",{"2":{"92":1}}],["juliadocumenttermmatrix",{"2":{"183":1}}],["juliadoc",{"2":{"67":1,"183":1}}],["juliadocs",{"2":{"46":1}}],["juliadf",{"2":{"7":1}}],["juliapositions1",{"2":{"183":2}}],["juliapermutation",{"2":{"183":1}}],["juliapush",{"2":{"181":1}}],["juliapack",{"2":{"183":1}}],["juliapackage",{"2":{"114":1}}],["juliaparent",{"2":{"183":1}}],["juliaparse",{"2":{"181":1}}],["juliapassthroughtagger",{"2":{"183":1}}],["juliapprint",{"2":{"61":1,"181":2}}],["juliapreprocess",{"2":{"183":1}}],["juliapreferences",{"2":{"181":1}}],["juliaprompt",{"2":{"108":1}}],["juliapromptingtools",{"2":{"63":1,"181":5,"183":1}}],["juliaprompts",{"2":{"14":1}}],["juliaprint",{"2":{"52":1,"179":1,"181":3,"183":1}}],["juliapt",{"2":{"24":2,"42":1,"52":1,"67":1,"89":2,"92":1,"93":1,"181":5,"183":3}}],["juliacc",{"2":{"183":1}}],["juliachunkkeywordsindex",{"2":{"183":2}}],["juliachunkembeddingsindex",{"2":{"183":1}}],["juliachoices",{"2":{"18":1,"88":1,"181":5}}],["juliacandidatechunks",{"2":{"183":1}}],["juliacallback",{"2":{"181":1}}],["juliacall",{"2":{"181":1}}],["juliacustomopenaischema",{"2":{"181":1}}],["juliacerebrasopenaischema",{"2":{"181":1}}],["juliacfg",{"2":{"65":1,"67":1,"183":3}}],["juliacreate",{"2":{"55":1,"180":1,"181":1,"183":1}}],["juliacb",{"2":{"52":1,"179":1}}],["juliacohere",{"2":{"183":1}}],["juliacoherereranker",{"2":{"183":1}}],["juliacosinesimilarity",{"2":{"183":1}}],["juliacountry",{"2":{"97":1}}],["juliacommands",{"2":{"58":1,"181":1}}],["juliacode",{"2":{"52":2,"181":4}}],["juliaconfigure",{"2":{"181":1}}],["juliaconv",{"2":{"181":4}}],["juliaconversation",{"2":{"35":1,"41":1,"87":1}}],["juliacontextenumerator",{"2":{"183":1}}],["juliacontext",{"2":{"58":1,"181":1}}],["juliaconst",{"2":{"15":1,"22":1,"23":1,"26":1,"181":15}}],["juliatags",{"2":{"183":1}}],["juliatavilysearchrefiner",{"2":{"183":1}}],["juliatavily",{"2":{"180":1}}],["juliatypeof",{"2":{"181":5}}],["juliatrigrams",{"2":{"183":1}}],["juliatrigram",{"2":{"183":1}}],["juliatrigramannotater",{"2":{"183":1}}],["juliatranslate",{"2":{"183":2}}],["juliatracerschema",{"2":{"181":1}}],["juliatracermessagelike",{"2":{"181":1}}],["juliatracermessage",{"2":{"181":1}}],["juliatryparse",{"2":{"181":1}}],["juliatruncate",{"2":{"179":1}}],["juliatokenize",{"2":{"183":1}}],["juliatoken",{"2":{"183":1}}],["juliatool",{"2":{"181":6}}],["juliatogetheropenaischema",{"2":{"181":1}}],["juliathompsonsampling",{"2":{"179":1}}],["juliatemplate",{"2":{"107":1}}],["juliatextchunker",{"2":{"183":1}}],["juliatext1",{"2":{"58":1,"181":1}}],["juliatext",{"2":{"16":1,"58":7,"181":7}}],["juliatpl",{"2":{"24":1}}],["juliatmps",{"2":{"13":2,"24":1,"181":4}}],["juliajulia>",{"2":{"13":1,"52":1,"179":1,"181":1}}],["juliaexperimental",{"2":{"182":1}}],["juliaexperttask",{"2":{"181":1}}],["juliaexperttestcodexml",{"0":{"178":1}}],["juliaexperttestcode",{"0":{"170":1}}],["juliaexperttranscriptcritic",{"0":{"142":1}}],["juliaexpertcottaskxml",{"0":{"177":1}}],["juliaexpertcottask",{"0":{"169":1}}],["juliaexpertaskxml",{"0":{"176":1}}],["juliaexpertask",{"0":{"168":1},"2":{"13":4,"24":6,"52":1,"106":2,"179":1,"181":6}}],["juliaextract",{"2":{"181":8,"183":1}}],["juliaexecute",{"2":{"181":1}}],["juliaencode",{"2":{"181":1}}],["juliaenv",{"2":{"79":1}}],["juliaeval",{"2":{"181":1}}],["juliaevaluate",{"2":{"179":1}}],["juliaevals",{"2":{"4":1,"5":1}}],["juliaerror",{"2":{"52":1,"179":1}}],["juliax",{"2":{"6":1,"181":1,"183":1}}],["julia>",{"2":{"5":1}}],["julia",{"2":{"2":3,"4":1,"6":1,"7":1,"10":1,"13":6,"19":2,"20":3,"21":2,"23":1,"24":14,"27":1,"29":1,"31":1,"43":1,"49":1,"51":2,"52":9,"58":1,"59":1,"60":1,"61":26,"67":3,"73":6,"74":1,"78":2,"79":3,"82":1,"88":2,"89":1,"92":1,"95":3,"104":1,"105":1,"106":1,"107":2,"108":2,"114":2,"124":4,"130":4,"131":2,"132":1,"142":8,"167":3,"168":2,"169":2,"170":3,"171":7,"172":8,"176":2,"177":2,"178":3,"179":12,"181":72,"183":17}}],["jls",{"2":{"2":2}}],["jl",{"0":{"80":1},"2":{"0":3,"1":1,"2":3,"8":2,"10":1,"23":3,"24":1,"26":2,"27":1,"29":1,"30":1,"31":1,"47":1,"52":1,"56":1,"58":3,"61":3,"66":1,"67":1,"70":1,"79":2,"82":2,"83":1,"86":3,"91":1,"95":2,"96":1,"99":1,"102":1,"106":1,"107":1,"108":1,"114":4,"179":1,"181":20,"183":5}}],["22",{"2":{"179":1}}],["2277",{"2":{"140":1}}],["26078",{"2":{"179":3}}],["267",{"2":{"128":1}}],["29",{"2":{"181":2}}],["29826",{"2":{"179":3}}],["2900",{"2":{"58":2,"181":2}}],["21",{"2":{"181":1}}],["2190",{"2":{"156":1}}],["210",{"2":{"132":1}}],["278",{"2":{"115":1}}],["2733",{"2":{"52":4,"179":4}}],["256",{"2":{"183":2}}],["2500",{"2":{"181":7}}],["25",{"2":{"67":3,"183":4}}],["25px",{"2":{"58":1,"181":6,"183":1}}],["248",{"2":{"176":1}}],["2487",{"2":{"130":1}}],["24",{"2":{"36":1}}],["24622",{"2":{"20":1,"181":2}}],["239",{"2":{"153":1}}],["23",{"2":{"52":1,"179":1,"181":1}}],["23rd",{"2":{"31":1}}],["237",{"2":{"13":1,"24":1,"168":1,"181":2}}],["2s",{"2":{"21":1,"51":1,"52":2,"179":2}}],["2064",{"2":{"142":1}}],["2000",{"2":{"170":1,"178":1}}],["200",{"2":{"77":1,"181":6}}],["20506",{"2":{"52":1,"179":1}}],["20737",{"2":{"52":4,"179":4}}],["2049",{"2":{"155":1}}],["20493",{"2":{"52":2,"179":2}}],["2048",{"2":{"28":1,"181":5}}],["2021",{"2":{"114":2}}],["2020",{"2":{"98":1}}],["20240307",{"2":{"181":1}}],["2024",{"2":{"31":1,"78":1,"181":1}}],["2023",{"2":{"15":1,"61":1,"181":12}}],["20",{"2":{"7":6,"18":1,"58":2,"124":1,"125":1,"181":8,"183":3}}],["2examples",{"2":{"7":1}}],["2",{"0":{"159":1},"1":{"160":1,"161":1,"162":1,"163":1,"164":1,"165":1,"166":1,"167":1,"168":1,"169":1,"170":1,"171":1,"172":1,"173":1,"174":1,"175":1,"176":1,"177":1,"178":1},"2":{"0":1,"7":3,"11":1,"13":1,"19":1,"20":3,"21":5,"24":3,"29":3,"46":1,"47":2,"51":5,"52":19,"55":1,"58":7,"61":1,"67":2,"73":1,"74":1,"77":1,"88":1,"89":1,"90":2,"108":2,"112":1,"114":2,"115":1,"120":1,"121":1,"130":3,"131":1,"136":1,"138":1,"157":1,"161":1,"163":2,"167":1,"169":1,"170":2,"171":3,"172":3,"178":2,"179":33,"180":1,"181":34,"183":14}}],["1`",{"2":{"181":1}}],["1+1",{"2":{"181":1}}],["16",{"2":{"181":2}}],["1643",{"2":{"178":1}}],["16k",{"2":{"58":1,"179":1,"181":1}}],["17",{"2":{"181":2}}],["175b",{"2":{"181":3}}],["1712",{"2":{"174":1}}],["172",{"2":{"162":1}}],["1>",{"2":{"155":1}}],["184",{"2":{"160":1}}],["18",{"2":{"149":1,"179":1,"181":1}}],["180",{"2":{"19":1,"181":4}}],["180cm",{"2":{"19":1,"181":7}}],["150",{"2":{"163":1}}],["1501",{"2":{"163":1}}],["1506",{"2":{"157":1}}],["1515",{"2":{"141":1}}],["151",{"2":{"137":1}}],["1536×2",{"2":{"181":1}}],["1536",{"2":{"16":1,"181":1}}],["1=worst",{"2":{"122":1}}],["13184",{"2":{"179":2}}],["1396",{"2":{"120":1}}],["1392",{"2":{"118":1}}],["1384",{"2":{"114":1}}],["1m",{"2":{"98":1}}],["1em",{"2":{"58":1,"181":6,"183":1}}],["1examples",{"2":{"7":1}}],["1px",{"2":{"58":1,"181":6,"183":1}}],["1475",{"2":{"170":1}}],["1415",{"2":{"121":1}}],["14966",{"2":{"52":4,"179":4}}],["14",{"2":{"52":1,"179":1}}],["128",{"2":{"183":2}}],["124",{"2":{"181":1}}],["127",{"2":{"84":1,"181":5}}],["12940",{"2":{"52":1,"179":1}}],["12",{"2":{"52":2,"74":1,"179":2,"181":2,"183":1}}],["123",{"2":{"24":1,"85":1}}],["120",{"2":{"10":2,"74":1,"106":2,"181":12}}],["11",{"2":{"183":1}}],["111",{"2":{"181":2}}],["11434",{"2":{"84":1,"181":2}}],["1143",{"2":{"58":1,"171":1,"172":1,"181":1}}],["114",{"2":{"23":1,"26":1}}],["1141",{"2":{"20":1,"181":2}}],["1106",{"2":{"15":2}}],["1928",{"2":{"120":3}}],["190",{"2":{"19":2,"181":6}}],["19",{"2":{"19":2,"61":1,"181":6}}],["10897",{"2":{"181":5}}],["10`",{"2":{"171":1,"172":1}}],["1073",{"2":{"126":1}}],["1074",{"2":{"117":1}}],["10examples",{"2":{"7":1}}],["10×8",{"2":{"7":1}}],["100k",{"2":{"183":1}}],["1000",{"2":{"77":1,"88":1,"152":1,"170":1,"178":1,"181":4,"183":3}}],["100x",{"2":{"74":1}}],["100",{"2":{"7":3,"52":4,"65":2,"67":5,"163":1,"179":3,"181":17,"183":15}}],["10",{"2":{"6":1,"7":4,"11":1,"52":8,"58":1,"61":1,"67":1,"73":2,"74":2,"108":1,"126":1,"179":6,"181":18,"183":11}}],["1024x1024",{"2":{"181":2}}],["1024",{"2":{"181":2}}],["102",{"2":{"4":1,"11":1}}],["1",{"0":{"158":1},"2":{"0":1,"5":1,"6":1,"7":13,"10":2,"13":3,"16":2,"18":1,"21":6,"24":4,"28":1,"30":1,"32":1,"33":1,"37":2,"40":1,"45":1,"47":1,"51":6,"52":48,"57":2,"58":9,"61":6,"67":9,"73":1,"77":1,"84":1,"87":1,"89":4,"90":2,"93":1,"97":1,"98":2,"106":3,"107":1,"108":3,"110":1,"112":2,"114":4,"115":1,"117":2,"118":2,"120":5,"121":17,"122":3,"124":1,"125":1,"126":1,"127":1,"128":1,"130":3,"131":3,"132":1,"134":1,"136":3,"137":2,"138":2,"140":1,"141":1,"142":1,"144":1,"145":1,"147":2,"149":2,"150":1,"152":1,"153":1,"155":5,"156":4,"157":3,"160":1,"161":2,"162":2,"163":2,"164":1,"165":1,"166":1,"167":1,"168":1,"170":3,"171":6,"172":5,"173":1,"174":1,"176":1,"177":1,"178":2,"179":63,"181":89,"183":47}}],["nfeedback",{"2":{"179":6}}],["n```",{"2":{"130":1,"131":1,"181":1}}],["nwhat",{"2":{"107":1}}],["nwe",{"2":{"7":1}}],["n=5",{"2":{"67":1,"183":1}}],["n=2",{"2":{"21":1,"51":1,"52":1,"179":1}}],["nparagraph",{"2":{"58":6,"90":2,"181":6}}],["nsfw",{"2":{"52":1,"179":1}}],["nsemijoin",{"2":{"7":1}}],["nbsp",{"2":{"52":9,"55":1,"58":5,"67":6,"179":37,"180":2,"181":184,"182":1,"183":142}}],["ngl",{"2":{"28":2}}],["nli",{"2":{"17":1}}],["nt2",{"2":{"183":3}}],["nt1",{"2":{"183":3}}],["nt",{"2":{"183":4}}],["nthreads",{"2":{"183":6}}],["nthe",{"2":{"7":1}}],["ntasks=2",{"2":{"74":2}}],["ntasks=1",{"2":{"67":1,"74":1,"183":4}}],["ntasks=10",{"2":{"14":1}}],["ntasks",{"2":{"67":1,"74":1,"183":7}}],["n7",{"2":{"7":1}}],["n6",{"2":{"7":1}}],["n5",{"2":{"7":1}}],["numerical",{"2":{"61":1}}],["num",{"2":{"37":2,"112":4,"179":6,"183":2}}],["number",{"2":{"14":1,"28":1,"52":14,"55":1,"57":1,"58":2,"61":1,"67":8,"74":4,"88":10,"108":1,"112":1,"138":1,"170":1,"178":1,"179":26,"180":1,"181":25,"183":19}}],["numbers",{"2":{"2":1,"52":4,"170":2,"178":2,"179":1,"181":3,"183":1}}],["null",{"2":{"7":1,"181":3}}],["n4",{"2":{"7":1}}],["n3",{"2":{"7":1}}],["n2",{"2":{"7":1}}],["n2×3",{"2":{"7":1}}],["n2×2",{"2":{"7":2}}],["n1",{"2":{"7":1}}],["njob",{"2":{"7":2}}],["njulia",{"2":{"7":1}}],["niche",{"2":{"126":1}}],["nice",{"2":{"13":1,"24":1,"39":1,"40":1,"42":1,"181":4}}],["nid",{"2":{"7":2}}],["nintroduction",{"2":{"7":1}}],["n─────┼───────────────",{"2":{"7":1}}],["n─────┼─────────────────────────",{"2":{"7":1}}],["n─────┼─────────────────",{"2":{"7":1}}],["naming",{"2":{"181":1}}],["name`",{"2":{"181":1}}],["named",{"2":{"170":1,"178":1,"181":19}}],["namedtuple=namedtuple",{"2":{"183":1}}],["namedtuples",{"2":{"171":1,"172":1,"183":1}}],["namedtuple",{"2":{"10":2,"52":7,"67":61,"106":2,"179":9,"180":2,"181":84,"183":79}}],["namespace",{"2":{"96":1}}],["names",{"2":{"7":3,"27":1,"28":1,"58":1,"61":1,"63":1,"65":1,"108":2,"114":5,"167":1,"171":1,"172":1,"181":27,"183":1}}],["name",{"2":{"7":6,"11":1,"12":2,"13":2,"15":1,"24":6,"28":1,"29":1,"30":1,"31":2,"37":1,"40":3,"42":1,"84":1,"85":1,"86":1,"87":5,"89":7,"91":1,"92":3,"105":2,"108":9,"114":1,"130":2,"161":1,"163":1,"170":1,"178":1,"179":1,"181":118,"183":1}}],["name=",{"2":{"7":1,"12":1,"85":1,"86":1,"89":2,"92":1,"93":1,"171":1,"172":1,"181":4}}],["narrative",{"2":{"174":2}}],["nature",{"2":{"156":1,"181":1}}],["naturally",{"2":{"181":1}}],["natural",{"2":{"61":1,"126":1,"128":1,"181":1}}],["native",{"2":{"78":1,"108":1}}],["navigate",{"2":{"1":1}}],["n",{"2":{"7":17,"13":2,"21":2,"24":8,"51":2,"52":7,"58":24,"65":2,"67":8,"90":10,"105":2,"107":4,"108":4,"130":1,"131":1,"179":18,"181":30,"183":18}}],["neighboring",{"2":{"183":1}}],["network",{"2":{"183":3}}],["nedeed",{"2":{"181":1}}],["never",{"2":{"181":1,"183":1}}],["negative",{"2":{"170":1,"174":1,"178":1,"183":1}}],["nesting",{"2":{"170":1,"178":1}}],["nested",{"2":{"0":1,"65":2,"67":5,"170":1,"178":1,"181":3,"183":16}}],["neuroplasticity",{"2":{"114":2}}],["nexample",{"2":{"108":1}}],["next",{"0":{"8":1},"2":{"21":1,"51":1,"52":1,"87":1,"107":2,"156":14,"163":1,"179":2,"181":2,"183":3}}],["nearest",{"2":{"89":2,"181":2}}],["near",{"2":{"59":1}}],["news",{"2":{"181":2}}],["newline",{"2":{"58":3,"181":4,"183":2}}],["newlines",{"2":{"57":1,"58":2,"90":1,"181":3}}],["new",{"0":{"78":1},"2":{"12":2,"24":2,"31":1,"52":8,"63":3,"67":2,"72":1,"73":1,"74":1,"78":3,"84":1,"87":1,"95":1,"97":1,"105":1,"107":1,"117":2,"118":1,"130":1,"131":1,"155":1,"156":1,"171":1,"172":1,"179":8,"181":32,"183":5}}],["necessary>",{"2":{"163":1}}],["necessary",{"2":{"10":1,"23":1,"27":1,"41":1,"49":1,"52":2,"59":1,"66":1,"86":1,"92":1,"93":1,"99":1,"106":1,"142":1,"155":1,"179":1,"181":18}}],["needing",{"2":{"120":1}}],["needs",{"2":{"52":1,"102":1,"108":1,"130":1,"142":1,"179":1,"181":1}}],["needed>",{"2":{"163":1}}],["needed",{"2":{"10":1,"19":1,"21":1,"49":1,"51":1,"67":1,"88":1,"106":1,"117":2,"118":2,"140":1,"142":1,"174":1,"179":2,"181":5,"183":1}}],["need",{"2":{"3":1,"4":2,"5":1,"7":1,"10":2,"11":2,"28":1,"32":1,"37":1,"39":1,"42":1,"51":1,"52":1,"54":1,"57":1,"58":1,"60":1,"61":3,"65":1,"67":1,"74":1,"75":1,"77":1,"79":1,"85":1,"86":2,"88":2,"89":1,"95":2,"103":2,"106":2,"108":4,"130":1,"131":1,"132":1,"155":1,"156":1,"157":1,"169":1,"170":1,"172":1,"174":1,"177":1,"178":1,"179":1,"181":28,"183":3}}],["noprocessor",{"2":{"181":1,"183":4}}],["nopostprocessor",{"2":{"67":2,"181":1,"183":6}}],["noembedder",{"2":{"181":1,"183":3}}],["noisy",{"2":{"118":1}}],["noise",{"2":{"2":1}}],["noschema",{"2":{"92":3,"107":1,"181":3}}],["noreranker",{"2":{"181":1,"183":4}}],["norephraser",{"2":{"181":1,"183":5}}],["norefiner",{"2":{"67":3,"181":1,"183":7}}],["normal",{"2":{"78":1,"181":9}}],["normalization",{"2":{"47":1}}],["normalizes",{"2":{"183":1}}],["normalized",{"2":{"16":1,"57":1,"58":1,"181":3,"183":1}}],["normalize",{"2":{"16":2,"47":2,"58":2,"181":8,"183":3}}],["norm",{"2":{"58":2,"181":2}}],["nodes",{"2":{"52":1,"67":5,"179":4,"183":15}}],["node",{"2":{"52":5,"67":3,"179":31,"181":4,"183":38}}],["nods",{"2":{"41":2}}],["nomic",{"2":{"31":2}}],["now",{"2":{"23":1,"24":1,"26":1,"32":1,"41":1,"52":2,"61":1,"77":1,"82":1,"88":1,"89":1,"91":1,"108":1,"179":3,"181":8,"183":1}}],["non",{"2":{"11":1,"21":1,"51":1,"52":1,"85":1,"179":1,"181":15,"183":2}}],["none",{"2":{"4":1,"21":1,"51":1,"67":2,"114":1,"115":1,"120":1,"138":2,"155":1,"156":1,"157":1,"170":1,"172":1,"174":1,"177":1,"178":1,"183":3}}],["no",{"2":{"10":1,"15":1,"20":1,"21":1,"51":1,"52":5,"61":3,"67":3,"78":2,"106":1,"107":2,"130":1,"132":1,"138":2,"156":1,"163":1,"179":6,"181":45,"183":20}}],["notfound",{"2":{"179":1}}],["notagfilter",{"2":{"181":1,"183":6}}],["notagger",{"2":{"67":2,"181":1,"183":10}}],["notation",{"2":{"52":1,"179":1}}],["notification",{"2":{"76":1}}],["notion",{"2":{"49":1}}],["notice",{"2":{"21":3,"22":1,"34":1,"51":2,"52":2,"61":1,"63":1,"65":2,"87":2,"88":1,"91":1,"97":1,"105":1,"107":1,"108":1,"179":2,"181":6,"183":1}}],["nothing",{"2":{"6":1,"7":1,"12":1,"19":4,"31":1,"52":19,"67":9,"73":1,"88":1,"92":1,"108":4,"117":2,"118":2,"155":1,"161":1,"179":24,"181":218,"183":55}}],["not",{"0":{"78":1},"2":{"1":1,"5":1,"7":3,"10":3,"11":1,"12":2,"19":1,"22":1,"23":1,"24":2,"27":1,"32":1,"35":2,"36":2,"41":1,"42":2,"43":1,"49":1,"52":21,"55":1,"58":5,"63":1,"67":1,"69":1,"71":2,"72":2,"73":2,"75":2,"76":1,"78":2,"79":1,"84":1,"87":1,"88":4,"90":1,"95":3,"99":1,"106":3,"108":9,"112":1,"117":1,"118":4,"120":1,"121":3,"126":1,"128":1,"130":2,"141":1,"152":1,"155":5,"156":1,"157":1,"169":1,"170":1,"171":2,"172":2,"174":2,"178":1,"179":23,"180":1,"181":98,"182":2,"183":11}}],["notes",{"2":{"52":2,"58":2,"67":4,"155":4,"156":1,"165":6,"167":7,"179":5,"181":7,"183":8}}],["notexist",{"2":{"21":1,"51":1,"52":2,"179":2}}],["noteworthy",{"2":{"10":1,"67":1,"183":1}}],["note",{"2":{"0":2,"1":2,"6":1,"7":4,"19":1,"21":1,"23":1,"24":1,"27":1,"30":1,"31":1,"37":1,"42":2,"51":1,"52":3,"66":1,"67":1,"69":1,"75":1,"78":1,"89":1,"105":1,"107":1,"108":1,"163":1,"179":5,"181":38,"182":1,"183":3}}],["pwd",{"2":{"181":2}}],["pct",{"2":{"174":3}}],["phrase",{"2":{"161":1}}],["phrasings",{"2":{"126":1}}],["photos",{"2":{"153":1}}],["phase",{"2":{"66":3,"67":1,"183":1}}],["phases",{"2":{"63":1}}],["python",{"2":{"43":1,"61":1,"171":1,"172":1,"181":2}}],["pkgdir",{"2":{"24":1}}],["pkg",{"2":{"24":2,"32":2,"52":1,"73":1,"96":2,"181":2}}],["png",{"2":{"20":2,"43":2,"181":10}}],["p",{"2":{"18":2,"88":4,"179":2,"181":5}}],["plots",{"2":{"114":2}}],["plural",{"2":{"91":1}}],["plus",{"2":{"17":2,"88":2,"181":4}}],["please",{"2":{"24":1,"73":1,"75":2,"121":1,"152":1,"179":1,"181":1}}],["plausible",{"2":{"174":2}}],["plain",{"2":{"163":2,"183":1}}],["plaintextexplain",{"2":{"174":1}}],["plaintextextract",{"2":{"115":1}}],["plaintextnotes",{"2":{"165":1,"167":1}}],["plaintextblog",{"2":{"152":1}}],["plaintextuser",{"2":{"136":1,"138":1,"163":1}}],["plaintextusing",{"2":{"24":1}}],["plaintextoriginal",{"2":{"127":1}}],["plaintexthere",{"2":{"126":1,"128":1}}],["plaintextquery",{"2":{"125":1}}],["plaintextwrite",{"2":{"124":1}}],["plaintextwe",{"2":{"117":1,"118":1}}],["plaintextignore",{"2":{"130":1}}],["plaintexti",{"2":{"112":1}}],["plaintextyour",{"2":{"152":1}}],["plaintextyou",{"2":{"112":1,"114":1,"120":1,"121":1,"122":1,"124":1,"125":1,"126":1,"127":1,"128":1,"136":1,"137":1,"138":1,"144":1,"145":1,"147":1,"150":1,"153":1,"160":1,"162":1,"164":1,"166":1,"168":1,"169":1,"170":1,"171":1,"172":1,"173":1,"174":1,"176":1,"177":1,"178":1}}],["plaintextact",{"2":{"110":1,"117":1,"118":1,"140":1,"141":1,"142":1,"155":1,"156":1,"161":1,"163":1,"165":1,"167":1}}],["plaintextaimessage",{"2":{"12":1}}],["plaintext2",{"2":{"92":3,"107":2}}],["plaintext>",{"2":{"12":1}}],["plaintext",{"2":{"11":1,"61":1,"97":2,"98":1,"110":1,"114":1,"115":1,"120":1,"121":1,"122":1,"130":1,"131":2,"132":2,"134":2,"137":1,"140":1,"141":1,"142":1,"144":1,"145":1,"147":1,"149":2,"150":1,"153":1,"155":1,"156":1,"157":1,"159":1,"160":1,"161":1,"162":1,"164":1,"166":1,"168":1,"169":1,"170":1,"171":1,"172":1,"173":1,"176":1,"177":1,"178":1}}],["placing",{"2":{"130":1,"131":1}}],["places",{"2":{"108":1,"114":1,"181":2}}],["placeholder",{"2":{"24":1,"67":1,"89":1,"105":1,"107":1,"137":1,"144":1,"145":1,"147":1,"150":1,"181":6,"183":1}}],["placeholders",{"0":{"98":1},"2":{"13":2,"24":4,"100":1,"105":1,"107":4,"108":2,"110":2,"112":2,"114":2,"115":2,"117":2,"118":2,"120":2,"121":2,"122":2,"124":2,"125":2,"126":2,"127":2,"128":2,"130":2,"131":2,"132":2,"134":2,"136":2,"137":1,"138":2,"140":2,"141":2,"142":2,"144":1,"145":1,"147":1,"149":2,"150":1,"152":2,"153":2,"155":2,"156":2,"157":2,"160":2,"161":2,"162":2,"163":2,"164":2,"165":2,"166":2,"167":2,"168":2,"169":2,"170":2,"171":2,"172":2,"173":2,"174":3,"176":2,"177":2,"178":2,"181":8}}],["place",{"2":{"21":1,"52":1,"63":1,"102":1,"156":1,"179":2,"181":4}}],["platform",{"2":{"75":1,"181":2}}],["plant",{"2":{"18":2,"88":1,"181":8}}],["plan",{"2":{"16":1,"23":1,"75":1,"156":1,"181":2}}],["plays",{"2":{"156":1}}],["playing",{"2":{"155":1}}],["playful",{"2":{"152":1}}],["play",{"2":{"8":1,"21":1,"51":1,"52":2,"179":2}}],["pprint",{"2":{"10":1,"60":2,"61":1,"67":4,"181":11,"183":12}}],["pesona",{"2":{"165":1}}],["penicillin",{"2":{"120":2}}],["perched",{"2":{"181":1}}],["permanently",{"2":{"181":1}}],["permanent",{"2":{"181":1}}],["permutation",{"2":{"181":7,"183":12}}],["persistent",{"2":{"181":1}}],["persistently",{"2":{"85":1}}],["persist",{"2":{"181":1}}],["persists",{"2":{"80":1}}],["personality",{"2":{"181":1}}],["personally",{"2":{"92":1}}],["personal",{"2":{"84":1,"181":2}}],["persona",{"0":{"154":1},"1":{"155":1,"156":1,"157":1},"2":{"24":1,"149":1,"150":1,"164":1,"165":4,"167":1,"181":5}}],["personas",{"2":{"24":1}}],["person",{"2":{"7":2,"19":1,"181":5}}],["periods",{"2":{"183":1}}],["period",{"2":{"74":1}}],["per",{"2":{"67":1,"74":5,"77":3,"108":4,"114":1,"179":1,"181":9,"183":4}}],["perfectly",{"2":{"121":1}}],["perfect",{"2":{"58":1,"108":1,"124":1,"181":3}}],["performance",{"2":{"14":1,"61":2,"127":1,"142":1,"171":1,"172":1,"181":1,"183":2}}],["perform",{"2":{"7":1}}],["perhaps",{"2":{"41":1}}],["perplexity",{"2":{"23":1,"27":1}}],["people",{"2":{"7":4,"98":1,"155":1,"181":2}}],["punctuation",{"2":{"183":2}}],["push",{"2":{"181":6}}],["puppy",{"2":{"161":1}}],["pure",{"2":{"126":1}}],["purposes",{"2":{"92":1,"107":1}}],["purpose",{"2":{"5":2,"6":1,"7":6,"10":2,"105":1,"106":2,"108":1,"140":2,"165":6,"167":6,"170":1,"178":1,"181":1}}],["published",{"2":{"61":1,"114":1}}],["pull",{"2":{"37":1,"84":2}}],["put",{"2":{"2":1,"52":1,"74":1,"108":1,"179":1,"181":1}}],["pipe",{"2":{"179":1,"181":1}}],["pipelines",{"2":{"52":1,"179":2,"182":1}}],["pipeline",{"2":{"6":1,"59":1,"61":2,"63":2,"65":1,"67":10,"92":3,"183":16}}],["pinpoint",{"2":{"156":1}}],["pinpointing",{"2":{"130":1}}],["pirate",{"2":{"89":4,"181":6}}],["piece",{"2":{"67":1,"183":3}}],["pieces",{"2":{"2":1,"66":1}}],["picking",{"2":{"144":1}}],["pick",{"2":{"8":1,"52":1,"102":1,"108":1,"136":1,"138":1,"179":1,"181":2}}],["picture",{"2":{"5":2,"6":1,"7":2}}],["pounds",{"2":{"181":1}}],["port",{"2":{"181":5}}],["porsche",{"2":{"88":2,"108":2}}],["pop",{"2":{"179":1}}],["population=",{"2":{"98":1}}],["population",{"2":{"97":1,"98":4}}],["popular",{"2":{"83":1}}],["points",{"2":{"121":1,"130":4,"140":1,"141":1,"152":1,"155":9,"156":4,"163":1,"164":1,"166":1,"173":1,"179":1,"181":2,"183":1}}],["point",{"2":{"63":1,"67":1,"89":2,"108":1,"120":1,"130":1,"155":2,"156":1,"163":2,"181":2,"183":1}}],["pose",{"2":{"156":1}}],["positive",{"2":{"77":1,"163":1,"170":1,"174":1,"178":1}}],["positions1",{"2":{"183":3}}],["positions3",{"2":{"183":2}}],["positions2",{"2":{"183":5}}],["positions",{"2":{"181":10,"183":32}}],["position",{"2":{"58":1,"181":1,"183":9}}],["pos",{"2":{"58":4,"181":4,"183":1}}],["post",{"2":{"152":5,"167":2,"181":6,"183":1}}],["posts",{"2":{"140":1,"167":1}}],["postorderdfs",{"2":{"52":3,"179":8}}],["postprocessor",{"2":{"67":8,"183":11}}],["postprocessing",{"0":{"47":1},"2":{"47":1,"66":1,"67":1,"183":3}}],["postprocess",{"2":{"45":1,"64":1,"67":3,"181":7,"183":5}}],["possibly",{"2":{"183":1}}],["possible",{"2":{"7":1,"52":1,"58":4,"124":1,"125":2,"153":1,"155":1,"170":1,"178":1,"179":1,"181":6,"183":4}}],["possess",{"2":{"12":1}}],["possessions",{"2":{"12":1,"35":1}}],["powerful",{"2":{"15":1,"21":1,"52":1,"59":1,"91":1,"108":2,"126":1,"150":1,"179":1,"181":1,"183":1}}],["powered",{"2":{"14":1,"98":1}}],["power",{"2":{"12":1,"114":1}}],["poor",{"2":{"4":1}}],["potentially",{"2":{"66":1,"67":2,"181":2,"183":2}}],["potential",{"2":{"2":2,"52":1,"66":1,"127":1,"174":1,"179":2}}],["pt",{"2":{"1":1,"15":2,"22":2,"23":3,"24":6,"25":1,"26":2,"27":1,"28":1,"29":4,"30":2,"31":2,"32":1,"35":2,"37":5,"39":1,"41":2,"42":6,"47":1,"52":6,"67":10,"82":1,"85":5,"86":4,"89":2,"91":2,"92":4,"107":12,"108":3,"179":19,"181":77,"183":30}}],["palm",{"2":{"181":1}}],["packed",{"2":{"183":8}}],["pack",{"2":{"181":1,"183":9}}],["packages",{"2":{"13":2,"24":2,"52":1,"61":1,"114":1,"171":1,"172":1,"181":4,"182":1,"183":2}}],["package",{"2":{"1":1,"2":1,"10":1,"24":10,"32":1,"37":1,"52":1,"59":1,"73":1,"96":1,"99":2,"102":1,"106":1,"114":1,"179":1,"181":9,"183":5}}],["payout",{"2":{"174":1}}],["payload",{"2":{"108":1}}],["paying",{"0":{"77":1},"2":{"77":1}}],["pay",{"2":{"74":1,"77":3}}],["painting",{"2":{"58":1,"181":1}}],["pair",{"0":{"5":1,"6":1},"2":{"181":5,"183":2}}],["pairs",{"0":{"4":1},"2":{"3":1,"6":1,"60":1,"67":3,"181":3,"183":3}}],["padding",{"2":{"58":1,"181":6,"183":1}}],["padawan",{"2":{"12":1,"35":1,"181":1}}],["pauses",{"2":{"41":1}}],["paper",{"2":{"21":2,"52":2,"125":1,"130":1,"179":2,"183":1}}],["page",{"2":{"8":1,"24":1,"32":1,"47":1,"71":1,"72":1,"95":1,"181":2}}],["pages",{"2":{"2":3,"11":1,"153":1}}],["paris",{"2":{"97":1,"183":3}}],["parents",{"2":{"179":1}}],["parent",{"2":{"67":1,"93":1,"179":4,"181":17,"183":41}}],["param2",{"2":{"183":1}}],["param1",{"2":{"183":1}}],["parameter",{"2":{"67":1,"107":1,"108":3,"179":1,"181":5,"183":5}}],["parameters",{"2":{"2":1,"6":2,"7":3,"10":2,"52":1,"67":16,"86":1,"106":2,"108":5,"179":3,"181":15,"183":29}}],["paragraphs",{"2":{"58":2,"90":1,"181":2}}],["paragraph",{"2":{"58":3,"90":1,"181":3,"183":2}}],["parallelism",{"2":{"61":1}}],["parallel",{"2":{"52":1,"61":7,"67":1,"179":1,"183":6}}],["paralellize",{"2":{"46":1}}],["parts",{"2":{"60":1,"141":1,"156":1,"179":1,"181":1}}],["particular",{"2":{"56":1,"67":1,"174":1,"181":1,"183":1}}],["particularly",{"2":{"52":1,"58":2,"144":1,"145":1,"147":1,"179":2,"181":4}}],["partially",{"2":{"181":1}}],["partial",{"2":{"24":2,"61":1,"92":2,"181":2,"183":2}}],["part",{"2":{"12":1,"52":1,"66":2,"67":2,"155":2,"179":1,"181":6,"183":2}}],["parseable",{"2":{"88":2}}],["parses",{"2":{"52":1,"181":1}}],["parser",{"2":{"52":1,"181":1}}],["parse",{"2":{"20":1,"52":2,"77":1,"179":1,"181":7}}],["parsed",{"2":{"8":1,"52":6,"108":1,"181":8}}],["parsing",{"2":{"7":1,"19":1,"52":6,"179":1,"181":8}}],["patience",{"2":{"41":1}}],["pathways",{"2":{"170":1,"178":1}}],["path=",{"2":{"20":1,"43":1,"181":5}}],["path",{"2":{"12":1,"35":2,"41":1,"43":2,"181":33}}],["paths",{"2":{"4":1,"67":4,"86":1,"181":2,"183":6}}],["patterns",{"2":{"181":3}}],["pattern",{"2":{"1":1,"11":1,"181":4}}],["past",{"2":{"87":2,"130":5,"179":2,"181":1}}],["paste",{"2":{"2":1}}],["passage",{"2":{"124":3,"125":3,"183":2}}],["passages",{"2":{"112":7,"183":1}}],["passtroughtagger",{"2":{"67":1,"183":1}}],["passthroughtagger",{"2":{"67":3,"181":1,"183":6}}],["passthrough",{"2":{"66":1,"181":1,"183":3}}],["pass",{"2":{"52":1,"63":1,"65":3,"67":4,"87":1,"179":1,"181":10,"183":22}}],["passes",{"2":{"21":1,"51":1,"183":4}}],["passed",{"2":{"6":1,"7":4,"52":2,"61":1,"63":2,"67":9,"88":1,"179":8,"181":8,"183":9}}],["passing",{"0":{"65":1},"2":{"0":1,"52":1,"179":1}}],["pragmatic",{"2":{"140":1,"141":1,"142":1}}],["practics",{"2":{"181":1}}],["practically",{"2":{"179":1}}],["practical",{"2":{"98":1,"108":1,"164":1,"166":1,"173":1,"181":1,"183":1}}],["practices",{"2":{"61":6,"67":1,"142":1,"183":6}}],["practice",{"2":{"4":1,"7":1,"107":1}}],["pristine",{"2":{"181":1}}],["primary",{"2":{"181":1}}],["pricing",{"2":{"77":1}}],["price",{"2":{"77":1}}],["privacy",{"0":{"71":1},"2":{"69":1}}],["principles",{"2":{"63":1}}],["printed",{"2":{"67":1,"181":2,"183":4}}],["printstyled",{"2":{"183":1}}],["prints",{"2":{"52":1,"67":1,"179":1,"181":2,"183":3}}],["println",{"2":{"52":5,"179":2,"181":7}}],["print",{"2":{"24":1,"49":3,"52":6,"58":1,"61":1,"67":4,"179":8,"181":37,"183":21}}],["printing",{"2":{"10":1,"60":1,"61":2,"181":1,"183":1}}],["priority",{"2":{"181":1}}],["prioritizing",{"2":{"144":1,"145":1,"147":1}}],["prioritize",{"2":{"114":1,"120":1,"156":1,"170":1,"171":1,"172":1,"178":1,"179":2}}],["prior",{"2":{"37":1,"181":1,"183":1}}],["pr",{"2":{"24":1}}],["prettify",{"2":{"183":1}}],["pretty",{"2":{"10":1,"52":1,"60":1,"61":2,"67":3,"92":1,"179":1,"181":5,"183":7}}],["pretend",{"2":{"181":1}}],["predicts",{"2":{"174":1}}],["prediction",{"2":{"174":6}}],["predictions",{"2":{"174":2}}],["prerequisites",{"0":{"95":1}}],["pre",{"2":{"92":1,"105":1,"181":2,"183":1}}],["preprocessor",{"2":{"183":2}}],["preprocessed",{"2":{"183":1}}],["preprocess",{"2":{"181":1,"183":4}}],["prepayment",{"2":{"95":1}}],["preparing",{"2":{"120":1}}],["prepare",{"2":{"64":1,"65":1,"67":1,"183":2}}],["prepared",{"2":{"52":1,"179":1}}],["preparation",{"2":{"63":1,"66":1,"183":11}}],["prepend",{"2":{"87":1}}],["prepended",{"2":{"52":1,"181":2}}],["prefill",{"2":{"181":3}}],["prefix",{"2":{"52":3,"78":2,"181":5}}],["prefences",{"2":{"181":1}}],["prefer",{"2":{"46":1,"58":2,"92":1,"161":1,"171":1,"172":1,"181":2}}],["preferencesfor",{"2":{"181":1}}],["preferences",{"0":{"80":1},"2":{"23":2,"26":2,"42":1,"73":1,"78":1,"80":3,"85":2,"181":36}}],["preference",{"2":{"0":1,"181":5}}],["preorderdfs",{"2":{"52":1,"179":3}}],["precedence",{"2":{"155":1,"159":1,"181":1}}],["preceding",{"2":{"2":1,"183":1}}],["precision",{"2":{"126":1,"153":1,"155":1}}],["precisely",{"2":{"155":2,"159":1,"163":1,"169":1}}],["precise",{"2":{"24":3,"105":1,"107":2,"150":1,"160":1,"164":1,"166":1,"168":1,"173":1,"176":1}}],["precompile",{"2":{"73":1}}],["precompiled",{"2":{"73":1}}],["precompilation",{"2":{"73":3}}],["present",{"2":{"126":1,"156":1,"157":1,"181":4}}],["preserve",{"2":{"58":1,"181":1,"183":1}}],["preserving",{"2":{"58":2,"181":2}}],["preset",{"2":{"0":1,"181":3}}],["press",{"2":{"24":2}}],["prev",{"2":{"183":1}}],["previously",{"2":{"58":1,"89":1,"90":1,"105":1,"130":1,"181":1,"183":3}}],["previous",{"2":{"24":1,"52":4,"87":1,"97":1,"103":2,"117":1,"118":1,"130":2,"155":1,"159":1,"179":5,"181":5}}],["previews",{"2":{"181":2}}],["preview",{"2":{"13":2,"15":2,"24":3,"78":2,"89":2,"179":3,"181":10}}],["prevent",{"2":{"11":1,"76":1,"181":1}}],["proposed",{"2":{"179":1}}],["propertynames",{"2":{"181":5}}],["property",{"2":{"181":6,"183":6}}],["properties",{"2":{"52":2,"93":1,"108":3,"179":1,"181":8,"183":1}}],["proper",{"2":{"52":1,"179":1,"181":1,"183":3}}],["properly",{"2":{"8":1}}],["professional",{"2":{"140":1,"163":1}}],["proficient",{"2":{"24":2}}],["project",{"2":{"89":2,"91":1,"181":1}}],["projects",{"2":{"80":1}}],["prototyping",{"2":{"89":1}}],["proxy",{"2":{"86":1,"179":1}}],["programming",{"2":{"61":2,"142":1,"167":1}}],["programmatically",{"2":{"52":1,"181":1}}],["programmer",{"2":{"13":1,"24":4,"104":1,"142":1,"166":1,"168":1,"169":1,"170":1,"171":1,"172":1,"176":1,"177":1,"178":1,"181":2}}],["program",{"2":{"52":2,"88":2,"179":2}}],["promising",{"2":{"52":1,"179":1}}],["prompting",{"2":{"181":1}}],["promptingtools",{"2":{"0":2,"1":4,"10":2,"12":1,"13":2,"15":2,"19":1,"21":1,"22":1,"23":4,"24":11,"25":3,"26":3,"27":1,"28":1,"29":1,"30":1,"31":1,"32":4,"37":3,"45":2,"46":1,"47":1,"48":2,"52":13,"53":1,"55":1,"56":1,"57":1,"58":7,"59":2,"61":1,"67":6,"73":5,"79":1,"80":3,"82":3,"83":1,"85":1,"87":4,"88":1,"89":5,"90":1,"91":1,"92":8,"93":3,"95":1,"96":4,"99":1,"106":1,"107":6,"108":2,"179":72,"180":4,"181":608,"182":3,"183":283}}],["promptengineerfortask",{"0":{"150":1}}],["prompts",{"0":{"12":1,"13":1,"35":1,"41":1,"103":1},"2":{"12":1,"13":2,"14":1,"17":2,"24":5,"74":1,"75":1,"100":2,"103":1,"105":2,"181":10}}],["prompt",{"0":{"89":1,"92":1,"105":1},"2":{"10":2,"17":3,"23":2,"24":2,"27":2,"31":2,"36":1,"42":3,"52":3,"63":1,"74":2,"85":2,"86":2,"88":2,"89":3,"92":3,"93":1,"97":1,"98":1,"100":1,"103":1,"105":2,"106":2,"108":8,"110":2,"112":2,"114":2,"115":2,"117":2,"118":2,"120":2,"121":2,"122":3,"124":2,"125":2,"126":2,"127":2,"128":2,"130":2,"131":2,"132":2,"134":2,"136":2,"137":2,"138":2,"140":2,"141":2,"142":2,"144":3,"145":3,"147":2,"149":5,"150":9,"152":3,"153":2,"155":3,"156":3,"157":1,"159":1,"160":2,"161":2,"162":2,"163":2,"164":2,"165":2,"166":2,"167":2,"168":2,"169":2,"170":2,"171":2,"172":2,"173":2,"174":2,"176":3,"177":3,"178":3,"179":3,"181":265,"183":3}}],["prob",{"2":{"181":4}}],["probabilities",{"2":{"181":1}}],["probability",{"2":{"174":4,"181":2}}],["probably",{"2":{"1":1,"11":2}}],["problems",{"2":{"58":2,"130":1,"169":1,"171":1,"172":1,"177":1,"181":2}}],["problem",{"2":{"41":1,"142":1,"169":1,"171":1,"172":1}}],["produce",{"2":{"36":1,"181":2}}],["production",{"2":{"182":1}}],["product",{"2":{"7":1,"16":1,"58":3,"181":5}}],["processor=rt",{"2":{"67":1,"183":1}}],["processor",{"2":{"67":11,"183":24}}],["processed",{"2":{"58":1,"181":2,"183":2}}],["processes",{"2":{"58":1,"67":1,"179":1,"181":1,"183":1}}],["process",{"2":{"21":1,"51":1,"56":1,"67":2,"74":2,"88":2,"91":2,"126":1,"128":1,"179":4,"181":9,"183":8}}],["processing",{"2":{"8":1,"58":1,"61":2,"67":1,"88":1,"97":1,"107":1,"179":1,"181":5,"183":1}}],["pro",{"2":{"14":1,"32":1,"33":1,"72":1,"97":1,"98":2,"181":2}}],["provide",{"2":{"2":2,"4":1,"5":2,"6":1,"7":2,"10":4,"12":1,"18":1,"19":2,"21":1,"23":2,"26":2,"30":1,"35":1,"37":1,"43":1,"51":2,"52":11,"54":1,"60":1,"61":3,"63":1,"65":1,"67":5,"69":1,"73":1,"86":1,"87":1,"89":1,"98":1,"102":1,"103":1,"105":2,"106":3,"108":6,"112":1,"114":1,"115":2,"117":3,"118":3,"120":1,"122":1,"126":1,"130":3,"138":1,"140":3,"141":4,"142":2,"149":1,"155":2,"161":1,"163":1,"165":1,"167":1,"179":16,"181":63,"183":12}}],["provides",{"2":{"2":1,"42":1,"48":1,"56":1,"58":1,"59":1,"114":1,"120":1,"156":1,"170":1,"171":1,"172":1,"178":1,"179":3,"181":3,"183":2}}],["provided",{"2":{"2":1,"7":1,"10":3,"15":1,"17":2,"20":1,"21":2,"24":2,"31":1,"49":1,"51":2,"52":7,"57":1,"58":2,"59":1,"61":2,"63":1,"66":3,"67":10,"89":1,"106":3,"107":1,"108":8,"110":2,"114":2,"115":2,"117":1,"118":1,"120":4,"121":9,"122":2,"124":2,"125":1,"127":1,"130":2,"132":1,"136":1,"137":3,"138":2,"140":1,"142":3,"144":1,"145":1,"147":1,"152":1,"153":1,"155":1,"157":2,"159":1,"161":2,"163":1,"165":1,"167":1,"169":2,"170":1,"174":2,"177":1,"178":2,"179":14,"181":103,"183":42}}],["provider",{"2":{"0":4,"27":2,"92":2,"101":1,"102":1,"107":1,"108":2,"181":1}}],["providers",{"0":{"0":1,"86":1,"101":1},"2":{"0":2,"23":1,"27":1,"67":1,"86":1,"92":1,"100":2,"101":2,"108":1,"181":3,"183":4}}],["providing",{"0":{"43":1},"2":{"0":1,"27":1,"28":1,"31":1,"43":1,"49":2,"67":2,"156":1,"179":2,"181":1,"183":2}}],["v3",{"2":{"183":3}}],["vocab",{"2":{"183":10}}],["vocabulary",{"2":{"67":1,"126":1,"183":8}}],["voyage",{"2":{"181":4}}],["voyager",{"2":{"70":1}}],["v2",{"2":{"37":1,"67":1,"183":1}}],["v1",{"2":{"28":1,"30":1,"31":1,"73":2,"181":9,"183":2}}],["v0",{"2":{"28":1,"108":1}}],["vcat",{"2":{"12":1}}],["vscodedisplay",{"2":{"13":2,"24":2,"181":4}}],["vscode",{"2":{"11":1,"13":1,"24":1,"181":2}}],["vs",{"2":{"10":1,"52":2,"58":1,"60":1,"67":1,"100":1,"179":2,"181":1,"183":2}}],["vidid",{"2":{"181":2}}],["video",{"2":{"155":3,"156":1}}],["videos",{"2":{"155":2,"156":2}}],["visible",{"2":{"153":1}}],["visits",{"2":{"179":6}}],["visit",{"2":{"75":1}}],["vision",{"2":{"10":1,"106":1,"181":4}}],["visualize",{"2":{"181":1}}],["visualization",{"2":{"61":1}}],["visual",{"0":{"151":1},"1":{"152":1,"153":1},"2":{"72":1,"95":1}}],["vibrant",{"2":{"58":1,"181":2}}],["viewers",{"2":{"181":2}}],["view",{"2":{"52":1,"179":1,"181":2,"183":9}}],["via",{"0":{"80":1},"2":{"2":1,"6":1,"12":1,"16":1,"20":1,"23":2,"26":2,"28":1,"42":1,"52":3,"57":1,"61":1,"63":1,"66":1,"67":2,"73":1,"78":1,"84":1,"86":1,"95":1,"101":1,"108":3,"110":1,"117":1,"118":1,"137":1,"144":1,"145":1,"147":1,"179":3,"181":21,"183":9}}],["vect",{"2":{"183":2}}],["vectorstore",{"2":{"128":1}}],["vectors",{"2":{"67":1,"181":3,"183":5}}],["vectorized",{"2":{"7":1}}],["vector",{"2":{"4":1,"13":2,"16":1,"19":2,"24":3,"31":1,"45":4,"47":1,"52":3,"58":13,"61":1,"64":1,"67":18,"87":1,"88":1,"89":4,"91":1,"92":4,"93":1,"102":1,"105":1,"107":4,"108":1,"179":5,"181":175,"183":82}}],["ve",{"2":{"37":1,"42":1,"74":1,"84":1,"107":1}}],["vegetable",{"2":{"17":1,"181":2}}],["verification",{"2":{"183":1}}],["verify",{"2":{"181":1}}],["versus",{"2":{"183":1}}],["version=",{"2":{"89":1,"181":1}}],["versions",{"2":{"73":1,"93":1}}],["version",{"2":{"7":1,"13":2,"24":1,"42":1,"51":1,"52":1,"61":1,"67":1,"73":1,"84":1,"89":1,"98":1,"110":1,"112":1,"114":1,"115":1,"117":1,"118":1,"120":1,"121":1,"122":1,"124":1,"125":1,"126":1,"127":1,"128":1,"130":2,"131":1,"132":1,"134":1,"136":1,"137":1,"138":1,"140":1,"141":1,"142":1,"144":1,"145":1,"147":1,"149":1,"150":1,"152":1,"153":1,"155":2,"156":1,"157":1,"160":1,"161":1,"162":1,"163":1,"164":1,"165":1,"166":1,"167":1,"168":1,"169":1,"170":1,"171":1,"172":1,"173":1,"174":1,"176":1,"177":1,"178":1,"179":1,"181":10,"183":3}}],["verbatim",{"2":{"157":3,"159":1}}],["verbosity",{"2":{"52":2,"67":1,"179":4,"183":2}}],["verbose=2",{"2":{"179":1}}],["verbose=true",{"2":{"67":1,"181":3,"183":1}}],["verbose=false",{"2":{"52":1,"179":1}}],["verbose",{"2":{"4":1,"6":1,"7":2,"10":1,"13":1,"21":1,"51":1,"52":6,"67":16,"106":1,"179":7,"181":42,"183":68}}],["very",{"2":{"10":1,"18":2,"21":1,"57":1,"74":1,"84":1,"106":1,"121":2,"164":1,"166":1,"169":1,"171":1,"172":1,"173":1,"177":1,"181":2,"183":1}}],["vararg",{"2":{"181":2}}],["varying",{"2":{"181":1}}],["variety",{"2":{"23":1,"26":1,"140":1}}],["various",{"0":{"9":1},"1":{"10":1,"11":1,"12":1,"13":1,"14":1,"15":1,"16":1,"17":1,"18":1,"19":1,"20":1,"21":1,"22":1,"23":1},"2":{"52":1,"63":1,"98":1,"173":1,"179":1,"183":2}}],["variables",{"2":{"12":1,"13":1,"24":1,"29":1,"52":2,"73":1,"79":1,"89":1,"92":1,"96":1,"97":1,"98":1,"114":1,"171":1,"172":1,"181":38}}],["variable",{"0":{"79":1},"2":{"0":1,"30":1,"31":1,"52":2,"54":1,"78":1,"79":3,"87":1,"92":1,"93":1,"95":3,"114":1,"142":1,"174":2,"181":14}}],["vanilla",{"2":{"108":1}}],["vanished",{"2":{"58":1,"181":1}}],["vast",{"2":{"58":1,"181":1}}],["valid",{"2":{"49":1,"67":1,"179":1,"181":1,"183":3}}],["validated",{"2":{"183":1}}],["validate",{"2":{"21":2,"51":1,"170":1,"178":1}}],["validation",{"0":{"21":1},"2":{"51":1}}],["value2",{"2":{"183":1}}],["value1",{"2":{"183":1}}],["value",{"2":{"7":2,"21":1,"51":1,"52":3,"114":1,"115":1,"171":1,"172":1,"174":2,"179":8,"181":18,"183":2}}],["values",{"2":{"6":1,"7":10,"52":1,"63":1,"114":1,"174":6,"179":1,"181":11,"183":6}}],["valuable",{"2":{"1":1}}],["vllm",{"2":{"0":1,"70":1,"181":1}}],["lucene",{"2":{"183":2}}],["l99",{"2":{"181":1}}],["l424",{"2":{"181":1}}],["l341",{"2":{"181":1}}],["l116",{"2":{"181":1}}],["l170",{"2":{"181":1}}],["l1007",{"2":{"181":1}}],["l1009",{"2":{"181":1}}],["l123",{"2":{"42":1}}],["lngpt3t",{"2":{"92":1}}],["l244",{"2":{"183":1}}],["l215",{"2":{"183":1}}],["l288",{"2":{"58":1,"181":1}}],["l252",{"2":{"58":1,"181":1}}],["llava",{"2":{"42":1}}],["llamaindex",{"2":{"117":1,"118":1,"125":1,"127":1}}],["llama123",{"2":{"42":3}}],["llama2",{"2":{"37":1,"39":1,"42":2,"84":1,"85":1,"181":1}}],["llama",{"0":{"28":1},"2":{"0":1,"25":1,"28":3,"29":6,"70":2,"100":1,"101":1,"181":6}}],["ll",{"2":{"21":1,"23":1,"26":1,"32":1,"34":1,"51":1,"52":1,"61":1,"75":1,"76":1,"84":1,"99":1,"108":3,"130":1,"179":1,"181":5}}],["llmtextanalysis",{"2":{"89":1}}],["llm",{"2":{"10":3,"11":1,"13":1,"19":1,"22":1,"24":1,"49":2,"52":5,"65":1,"67":2,"77":1,"86":2,"100":1,"101":1,"106":4,"137":1,"179":7,"181":11,"183":3}}],["llms",{"2":{"10":1,"13":1,"19":1,"67":1,"83":1,"84":1,"103":2,"106":1,"108":1,"112":1,"181":2,"183":2}}],["lt",{"2":{"10":3,"37":1,"55":2,"58":1,"64":1,"67":4,"78":1,"79":1,"84":1,"86":1,"89":3,"95":1,"102":2,"103":1,"104":1,"105":3,"106":3,"108":1,"179":1,"180":2,"181":29,"183":31}}],["laptop",{"2":{"108":1}}],["launch",{"2":{"79":1,"84":1}}],["launching",{"2":{"79":1,"95":1}}],["launched",{"2":{"37":1}}],["latter",{"2":{"70":1}}],["latency",{"2":{"181":2,"183":3}}],["latest",{"2":{"13":1,"15":1,"24":4,"28":1,"61":2,"71":1,"73":1,"84":1,"110":1,"117":1,"118":1,"130":1,"168":1,"176":1,"179":1,"181":8}}],["later",{"2":{"2":1,"4":1,"7":1,"13":1,"47":1,"76":1,"108":1,"181":2,"183":2}}],["lament",{"2":{"58":1,"181":1}}],["langchain",{"0":{"90":1},"2":{"58":3,"90":1,"128":1,"181":3}}],["languages",{"2":{"32":1,"61":1,"181":2}}],["language",{"2":{"10":1,"13":3,"14":3,"21":1,"22":1,"24":6,"49":1,"52":2,"61":6,"100":1,"101":1,"103":1,"106":1,"124":2,"126":1,"128":1,"167":2,"168":2,"169":2,"170":1,"171":2,"172":2,"176":2,"177":2,"178":1,"179":2,"181":5,"183":3}}],["lazily",{"2":{"52":1,"179":1}}],["lazy",{"2":{"10":4,"21":2,"49":5,"51":2,"52":5,"88":1,"106":6,"179":20,"182":1}}],["layers",{"2":{"28":1}}],["largeint",{"2":{"88":3}}],["large",{"2":{"22":1,"30":1,"52":1,"57":1,"58":1,"61":1,"67":1,"100":1,"101":1,"103":1,"170":1,"178":1,"179":2,"181":2,"183":9}}],["larger",{"2":{"7":5,"57":1,"58":1,"67":1,"98":2,"163":1,"181":3,"183":1}}],["last",{"2":{"21":3,"49":3,"51":4,"52":32,"75":1,"87":4,"88":4,"107":1,"108":2,"130":1,"179":36,"181":40,"183":5}}],["lastly",{"2":{"10":1,"52":1,"179":1,"183":1}}],["lake",{"2":{"19":2}}],["labeling",{"2":{"161":2}}],["labeled",{"2":{"157":1}}],["labels",{"2":{"18":1,"152":1,"161":1}}],["label",{"2":{"18":1,"136":3,"161":4,"174":3}}],["lawyer",{"2":{"7":4}}],["led",{"2":{"120":1}}],["legend",{"2":{"61":1}}],["legacy",{"2":{"20":1,"181":2}}],["leetcode",{"2":{"58":1,"181":1}}],["less",{"2":{"52":1,"163":1,"167":1,"179":1}}],["leveraging",{"2":{"181":2}}],["leverages",{"2":{"23":1,"26":1,"54":1,"108":1,"179":1}}],["leverage",{"2":{"13":1,"14":1,"21":1,"52":1,"61":1,"102":1,"108":1,"165":2,"167":2,"179":1,"181":2}}],["level",{"2":{"52":2,"58":1,"63":3,"65":1,"67":2,"167":1,"179":4,"181":13,"183":4}}],["leaves",{"2":{"179":2}}],["leave",{"2":{"167":1,"181":1}}],["leaving",{"2":{"58":2,"181":2}}],["leadership",{"2":{"157":1}}],["leads",{"2":{"35":1,"181":2}}],["leaf",{"2":{"67":2,"183":2}}],["least",{"2":{"66":1,"67":1,"157":1,"181":3,"183":1}}],["learn",{"2":{"181":1}}],["learned",{"2":{"12":1,"107":1}}],["learning",{"2":{"10":1,"49":1,"52":1,"61":1,"106":1,"179":1}}],["lengths",{"2":{"183":1}}],["length=20",{"2":{"58":1,"181":1}}],["length=10",{"2":{"58":2,"67":1,"90":1,"181":2,"183":3}}],["length=10000",{"2":{"58":2,"181":2}}],["length=13",{"2":{"58":1,"181":1}}],["length",{"2":{"7":1,"8":1,"21":2,"24":1,"28":1,"51":2,"52":32,"57":7,"58":35,"61":1,"67":4,"90":1,"140":1,"179":49,"181":55,"183":30}}],["left",{"2":{"6":1,"7":12,"52":1,"179":1,"181":2}}],["letters",{"2":{"152":1,"183":1}}],["letter",{"2":{"130":1,"131":1}}],["let",{"2":{"1":1,"2":2,"3":1,"5":1,"6":2,"7":2,"12":1,"19":1,"20":1,"22":1,"23":1,"24":3,"26":1,"30":1,"32":1,"37":1,"41":2,"51":1,"52":8,"58":1,"61":3,"65":1,"67":1,"74":1,"78":2,"87":1,"88":2,"89":1,"107":2,"108":7,"130":1,"131":1,"179":9,"181":8,"183":5}}],["lossless",{"2":{"183":1}}],["losses",{"2":{"179":1}}],["losing",{"2":{"41":1}}],["lot",{"2":{"56":1}}],["lower",{"2":{"52":3,"58":1,"67":1,"179":3,"181":1,"183":1}}],["lowercased",{"2":{"183":1}}],["lowercase",{"2":{"20":1,"52":2,"179":2,"181":2,"183":1}}],["low",{"2":{"52":1,"74":1,"179":1,"181":8,"183":5}}],["love",{"2":{"35":1}}],["loads",{"2":{"89":1,"181":5}}],["loading",{"2":{"67":1,"181":1,"183":1}}],["loaded",{"2":{"24":1,"79":1,"181":2,"183":2}}],["load",{"2":{"23":1,"24":6,"26":1,"59":1,"89":10,"105":1,"108":1,"181":35,"183":3}}],["longer",{"2":{"114":1,"163":1,"181":1,"183":1}}],["longest",{"2":{"57":6,"58":17,"181":17}}],["long",{"2":{"23":2,"24":1,"26":1,"27":1,"29":1,"52":3,"74":1,"86":1,"179":4,"181":3}}],["location",{"2":{"19":3,"93":1,"181":13}}],["localserver",{"2":{"181":1}}],["localserveropenaischema",{"2":{"0":1,"181":5}}],["localpreferences",{"2":{"80":1,"181":2}}],["locally",{"2":{"25":1,"43":1,"67":3,"70":1,"100":1,"101":1,"108":1,"181":2,"183":3}}],["localhost",{"2":{"23":1,"28":1,"65":3,"67":3,"181":7,"183":3}}],["local",{"0":{"37":1},"1":{"38":1,"39":1,"40":1,"41":1,"42":1,"43":1,"44":1,"45":1,"46":1,"47":1},"2":{"0":2,"23":1,"43":1,"52":1,"69":1,"73":1,"77":1,"78":1,"85":1,"179":1,"181":20,"183":1}}],["logprobs",{"2":{"181":4}}],["logged",{"2":{"67":1,"183":1}}],["logging",{"0":{"93":1},"2":{"52":2,"67":5,"179":6,"181":6,"183":16}}],["logical",{"2":{"142":1}}],["logically",{"2":{"142":1}}],["logic",{"2":{"58":1,"67":5,"102":1,"107":1,"142":1,"179":1,"181":9,"183":9}}],["logit",{"2":{"10":1,"18":1,"106":1,"181":12}}],["logo",{"2":{"20":1,"181":2}}],["logs",{"2":{"10":1,"67":2,"106":1,"181":3,"183":4}}],["log",{"2":{"7":1,"11":1,"67":1,"93":2,"181":33,"183":1}}],["loose",{"2":{"183":1}}],["loosely",{"2":{"103":1,"105":1,"130":1}}],["look",{"2":{"67":1,"74":1,"90":1,"107":1,"181":1,"183":2}}],["looking",{"2":{"23":1,"26":1,"61":3,"179":1,"183":1}}],["looks",{"2":{"15":1,"52":1,"66":1,"105":1,"181":4,"183":2}}],["lookups",{"2":{"183":1}}],["lookup",{"2":{"2":2,"8":1,"126":1,"181":2,"183":3}}],["loop",{"2":{"7":1,"24":1,"74":1,"179":2}}],["lifting",{"2":{"108":1}}],["lifecycle",{"2":{"179":1}}],["life",{"2":{"29":1,"78":2}}],["lightweight",{"2":{"183":1}}],["light",{"2":{"97":1,"108":1,"181":4}}],["libraries",{"2":{"61":1,"171":1,"172":1}}],["library",{"2":{"37":1,"84":1}}],["living",{"2":{"35":1,"181":1}}],["linux",{"2":{"173":3}}],["linuxbashexpertask",{"0":{"173":1}}],["links",{"2":{"181":8}}],["link",{"2":{"24":1,"37":1,"155":1,"181":1}}],["line",{"2":{"11":1,"24":1,"28":2,"79":1,"155":1,"156":1,"181":3,"183":1}}],["lines",{"0":{"2":1},"2":{"2":1,"52":3,"58":1,"181":9,"183":2}}],["linearalgebra",{"2":{"1":2,"16":2,"47":2,"59":1,"181":5,"183":5}}],["limitations",{"2":{"99":1,"181":2}}],["limits",{"0":{"76":1},"2":{"72":1,"74":4,"76":1,"179":1,"181":2}}],["limited",{"2":{"58":1,"174":1,"181":3}}],["limit",{"0":{"74":1},"2":{"14":1,"74":7,"76":4,"108":1,"181":9,"183":6}}],["listened",{"2":{"58":1,"181":1}}],["listed",{"2":{"7":2,"112":2}}],["list",{"2":{"10":1,"24":1,"37":2,"55":4,"58":1,"60":1,"61":1,"84":1,"106":1,"108":1,"130":3,"136":1,"138":1,"141":1,"142":1,"156":2,"180":4,"181":25,"183":12}}],["literate",{"2":{"8":1,"24":1,"47":1}}],["likelyhood",{"2":{"181":2}}],["likely",{"2":{"10":2,"74":1,"77":1,"78":1}}],["like",{"0":{"86":1,"90":1},"2":{"2":2,"6":1,"7":2,"12":1,"13":1,"19":1,"21":1,"24":2,"25":1,"30":1,"31":1,"32":1,"35":1,"39":1,"42":2,"49":2,"51":1,"52":4,"58":1,"63":1,"66":1,"67":3,"86":1,"87":1,"88":1,"89":5,"93":4,"98":1,"108":1,"114":1,"162":1,"170":1,"171":1,"172":1,"178":1,"179":5,"181":25,"183":10}}],["kw2",{"2":{"183":2}}],["kw",{"2":{"183":3}}],["kwarg",{"2":{"10":1,"52":1,"63":3,"73":1,"78":1,"87":1,"106":1,"179":1,"183":4}}],["kwargs`",{"2":{"65":1}}],["kwargs=",{"2":{"20":1,"23":1,"27":1,"28":1,"37":2,"52":3,"61":1,"63":1,"93":1,"106":2,"108":2,"179":3,"181":10,"183":2}}],["kwargs",{"0":{"81":1},"2":{"0":2,"10":3,"21":2,"29":1,"51":2,"52":15,"61":2,"63":2,"65":22,"67":81,"86":1,"89":1,"92":1,"93":2,"106":3,"108":2,"179":33,"180":2,"181":193,"183":150}}],["king",{"2":{"55":2,"180":2}}],["kinds",{"2":{"7":3}}],["knows",{"2":{"67":1,"102":1,"183":1}}],["knowing",{"2":{"17":1}}],["knowledge",{"2":{"13":1,"15":1,"24":4,"59":2,"110":1,"117":1,"118":1,"164":1,"166":1,"168":1,"173":1,"176":1,"181":2}}],["know",{"2":{"12":1,"23":1,"24":2,"26":1,"30":2,"31":1,"74":1,"88":1,"110":2,"117":2,"118":2,"171":1,"172":1,"181":1}}],["known",{"2":{"10":1,"32":1,"58":1,"90":1,"106":1,"181":4}}],["k=5",{"2":{"183":1}}],["k=5`",{"2":{"183":1}}],["k=100",{"2":{"67":1,"183":1}}],["k=",{"2":{"7":10}}],["k",{"2":{"2":1,"6":2,"7":2,"28":1,"37":1,"65":2,"67":7,"183":29}}],["kept",{"2":{"42":1}}],["keeping",{"2":{"179":1}}],["keeps",{"2":{"6":1,"181":2}}],["keep",{"2":{"2":1,"8":1,"21":1,"27":1,"28":1,"52":1,"114":1,"155":1,"156":1,"159":1,"179":2,"181":1,"183":3}}],["key1",{"2":{"181":1}}],["keylocal",{"2":{"181":1}}],["keypreset",{"2":{"181":1}}],["key=env",{"2":{"23":1,"26":1}}],["keywordsprocessor",{"2":{"67":1,"181":1,"183":6}}],["keywords",{"2":{"67":5,"114":1,"115":2,"126":3,"161":1,"181":1,"183":27}}],["keywordsindexer",{"2":{"67":1,"181":1,"183":4}}],["keyword",{"0":{"65":1,"81":1},"2":{"10":5,"12":1,"14":1,"15":1,"24":2,"43":2,"49":1,"52":5,"63":2,"65":2,"67":11,"74":1,"86":1,"87":2,"89":1,"98":1,"105":1,"106":5,"115":1,"126":1,"149":1,"179":11,"181":65,"183":18}}],["keys",{"2":{"7":1,"23":1,"27":1,"61":1,"107":1,"108":1,"181":10,"183":10}}],["key",{"0":{"72":1,"73":3,"79":1,"80":1,"100":1},"1":{"101":1,"102":1,"103":1,"104":1,"105":1,"106":1},"2":{"0":3,"6":1,"7":9,"8":1,"23":5,"24":2,"26":2,"27":3,"29":3,"30":1,"31":1,"32":3,"52":2,"54":2,"55":4,"67":2,"72":3,"73":9,"78":3,"79":10,"80":5,"86":1,"95":12,"99":1,"100":1,"107":1,"108":2,"124":1,"125":1,"140":1,"144":1,"145":1,"147":1,"152":2,"155":3,"156":5,"161":1,"171":3,"172":3,"179":3,"180":5,"181":164,"183":17}}],["uct",{"2":{"179":12,"181":1}}],["ultimately",{"2":{"88":1}}],["u>",{"2":{"58":1,"181":6,"183":1}}],["u>promptingtools",{"2":{"58":1,"181":6,"183":1}}],["uint16",{"2":{"179":1}}],["uint64",{"2":{"45":2,"183":7}}],["uint8",{"2":{"45":1}}],["utils",{"2":{"58":1,"181":1}}],["utilized",{"2":{"114":1,"181":1}}],["utilizes",{"2":{"0":1}}],["utilizing",{"2":{"61":1}}],["utility",{"2":{"49":2,"57":1,"90":1,"107":1,"156":1,"181":6}}],["utilities",{"0":{"56":1},"1":{"57":1,"58":1},"2":{"21":1,"48":1,"49":2,"51":1,"52":1,"56":1,"57":1,"58":2,"59":1,"67":1,"179":2,"183":2}}],["ut",{"2":{"19":1}}],["unpack",{"2":{"183":4}}],["untyped",{"2":{"181":1}}],["until",{"2":{"21":3,"51":2,"52":2,"74":1,"179":1,"181":1}}],["unhealthy",{"2":{"181":1}}],["unable",{"2":{"181":1}}],["unanswered",{"2":{"58":1,"181":1}}],["unbiased",{"2":{"161":1}}],["unchanged",{"2":{"181":1,"183":1}}],["unclear",{"2":{"141":2,"183":1}}],["uncommon",{"2":{"126":1}}],["unfortunately",{"2":{"108":2}}],["unwrapping",{"2":{"181":2}}],["unwraps",{"2":{"179":1,"181":1}}],["unwrap",{"2":{"93":1,"179":2,"181":11}}],["unnecessary",{"2":{"90":1,"120":1,"128":1}}],["unexpected",{"2":{"76":1}}],["unexported",{"2":{"48":1,"59":1,"82":1}}],["unlock",{"2":{"106":1}}],["unlike",{"2":{"75":1,"183":1}}],["unless",{"2":{"58":1,"78":1,"163":2,"171":2,"172":2,"181":5}}],["unspecified",{"2":{"181":6}}],["unspoken",{"2":{"58":1,"181":1}}],["unsuccessfully",{"2":{"52":1,"181":1}}],["unsafe",{"2":{"52":7,"179":2,"181":7}}],["unusable",{"2":{"37":1}}],["un",{"2":{"24":1,"37":1,"181":1}}],["unique",{"2":{"67":1,"114":1,"179":1,"181":10,"183":8}}],["universal",{"2":{"52":1,"179":1}}],["union",{"2":{"19":3,"31":1,"52":11,"58":1,"67":4,"108":1,"179":17,"181":138,"183":35}}],["units",{"2":{"183":1}}],["unitrange",{"2":{"45":1}}],["unit",{"2":{"19":2,"77":1,"100":1,"104":1,"130":1,"170":2,"178":2,"181":1,"183":5}}],["unicode",{"2":{"1":2,"59":1,"183":3}}],["unknown",{"2":{"17":3,"137":2,"181":5}}],["underscores",{"2":{"183":1}}],["understood",{"2":{"92":1,"142":1,"181":1}}],["understandable",{"2":{"121":1,"181":1}}],["understand",{"2":{"49":1,"52":1,"67":1,"99":1,"140":1,"142":1,"155":1,"156":1,"167":1,"181":2,"183":1}}],["understanding",{"0":{"81":1},"2":{"41":1,"99":1,"120":1,"140":1}}],["underlying",{"2":{"10":1,"24":1,"49":1,"52":1,"59":1,"106":1,"108":1,"161":1,"179":2,"181":2,"183":5}}],["under",{"2":{"2":1,"18":1,"23":1,"26":1,"28":1,"49":1,"58":1,"61":1,"89":2,"99":1,"107":2,"108":1,"181":7}}],["updating",{"2":{"181":1}}],["updates",{"2":{"61":1,"179":2,"181":2,"183":1}}],["updated",{"2":{"52":1,"179":4,"181":5}}],["update",{"2":{"20":1,"52":1,"73":1,"179":1,"181":7,"183":1}}],["upto",{"2":{"181":2}}],["upfront",{"2":{"65":1,"67":1,"183":1}}],["uppercase",{"2":{"183":1}}],["uppercased",{"2":{"114":1}}],["upper",{"2":{"52":2,"179":4}}],["uploads",{"2":{"20":1,"181":2}}],["upon",{"2":{"12":1,"52":3,"70":1,"156":1,"179":2,"181":2}}],["up",{"2":{"1":1,"8":1,"11":1,"15":1,"52":2,"54":1,"61":1,"66":1,"72":1,"75":1,"87":1,"88":1,"105":1,"107":1,"108":3,"110":1,"117":1,"118":1,"163":1,"179":3,"181":4,"183":5}}],["usable",{"2":{"100":1,"105":1}}],["usage",{"2":{"21":1,"36":1,"58":1,"71":1,"142":1,"179":1,"181":5,"183":1}}],["usd",{"2":{"74":1}}],["usually",{"2":{"52":1,"73":1,"105":1,"179":2,"181":4}}],["usual",{"2":{"29":1,"30":2,"31":2}}],["us",{"2":{"10":1,"21":2,"49":2,"51":2,"52":1,"61":1,"71":1,"106":1,"179":1}}],["using",{"0":{"22":1,"23":1,"24":1,"26":1,"27":1,"28":1,"29":1,"30":1,"31":1,"47":1,"86":1,"98":1},"2":{"1":4,"5":1,"7":2,"8":2,"10":1,"23":1,"24":4,"27":1,"37":1,"42":1,"47":1,"58":6,"59":2,"60":1,"61":4,"66":3,"67":7,"82":1,"85":1,"89":1,"90":1,"95":1,"96":2,"106":1,"108":2,"112":1,"114":1,"120":1,"130":1,"156":4,"170":2,"174":1,"177":1,"178":2,"179":8,"181":39,"183":40}}],["uses",{"2":{"10":2,"11":1,"13":1,"28":1,"49":1,"67":1,"90":1,"106":2,"131":1,"181":9,"183":34}}],["useful",{"2":{"10":1,"15":1,"17":1,"21":1,"51":1,"52":4,"58":3,"67":4,"84":1,"106":2,"117":2,"118":1,"144":1,"145":1,"171":1,"172":1,"176":1,"177":1,"178":1,"179":2,"181":16,"183":16}}],["users",{"2":{"167":1,"181":1}}],["user=",{"2":{"89":1,"93":1,"105":1,"181":3}}],["usermessagewithimages",{"2":{"104":1,"181":7}}],["usermessage",{"2":{"10":1,"12":4,"24":5,"35":1,"41":1,"49":1,"52":2,"87":2,"89":2,"92":3,"100":2,"104":2,"105":2,"106":1,"107":2,"179":14,"181":35}}],["user",{"2":{"10":3,"12":2,"13":1,"15":1,"17":1,"24":1,"35":1,"36":1,"41":1,"49":1,"52":10,"59":1,"66":1,"78":1,"89":3,"92":3,"100":1,"102":1,"104":3,"105":1,"106":4,"107":1,"108":5,"110":1,"112":1,"114":2,"115":1,"117":1,"118":1,"120":2,"121":4,"122":2,"124":2,"125":2,"126":3,"127":1,"128":3,"130":6,"131":2,"132":1,"134":2,"136":3,"137":1,"138":3,"140":10,"141":6,"142":11,"144":4,"145":4,"147":4,"149":5,"150":3,"152":1,"153":1,"155":2,"156":2,"159":2,"160":1,"161":2,"162":1,"163":6,"164":1,"165":1,"166":1,"167":1,"168":1,"169":1,"170":5,"171":2,"172":2,"173":1,"174":1,"176":1,"177":1,"178":9,"179":13,"181":68}}],["used",{"2":{"6":2,"7":1,"15":4,"16":1,"17":2,"18":1,"52":2,"58":4,"67":6,"71":2,"87":1,"93":1,"100":1,"130":1,"131":1,"174":1,"179":11,"181":97,"183":25}}],["use",{"2":{"0":1,"1":1,"2":5,"7":2,"8":3,"10":3,"12":2,"13":4,"15":2,"16":1,"17":1,"18":3,"19":2,"20":1,"21":4,"22":2,"23":3,"24":8,"25":1,"26":2,"27":1,"28":1,"29":4,"30":4,"31":4,"32":1,"33":1,"34":1,"37":1,"39":1,"42":1,"43":1,"46":1,"51":4,"52":9,"54":1,"55":1,"58":7,"59":1,"60":1,"61":1,"63":1,"64":1,"65":1,"66":1,"67":46,"69":2,"71":4,"74":1,"77":2,"78":6,"79":1,"80":1,"82":1,"83":1,"85":2,"86":1,"87":3,"88":4,"89":6,"91":2,"93":2,"97":3,"98":4,"99":1,"102":1,"106":2,"107":3,"108":6,"130":4,"132":1,"152":1,"155":9,"156":2,"161":1,"163":3,"164":1,"166":1,"167":1,"169":1,"171":1,"172":1,"173":1,"174":1,"179":19,"180":1,"181":138,"183":85}}],["urls",{"2":{"104":1,"181":3}}],["url=env",{"2":{"29":1}}],["url=provider",{"2":{"27":1}}],["url=",{"2":{"23":1,"28":1,"181":2}}],["url",{"2":{"0":3,"10":1,"20":2,"23":1,"27":4,"43":1,"65":3,"67":4,"86":3,"106":1,"180":1,"181":49,"183":7}}],["rules",{"2":{"183":1}}],["runtime",{"2":{"52":2,"179":2}}],["runs",{"2":{"52":2,"67":1,"73":1,"84":1,"179":1,"181":2,"183":3}}],["running",{"2":{"7":1,"22":2,"23":1,"24":1,"37":1,"61":1,"73":1,"84":3,"179":1,"181":2}}],["run",{"2":{"6":2,"7":3,"10":3,"14":1,"21":5,"22":1,"42":1,"49":3,"51":3,"52":12,"61":1,"64":1,"67":1,"73":1,"79":1,"80":1,"84":1,"88":2,"89":1,"92":5,"95":1,"98":1,"106":4,"108":2,"179":33,"181":57,"183":11}}],["ripple",{"2":{"58":1,"181":1}}],["river",{"2":{"58":1,"181":1}}],["right",{"2":{"7":11,"102":2,"108":2,"144":1,"179":1,"181":1,"183":4}}],["rm",{"2":{"24":1,"73":1}}],["rolls",{"2":{"183":1}}],["role=",{"2":{"181":8}}],["role",{"2":{"23":1,"27":1,"61":1,"92":2,"107":3,"156":1,"181":3}}],["root",{"2":{"52":3,"67":5,"179":11,"181":1,"183":8}}],["robust",{"2":{"21":1,"69":1,"95":1,"106":1}}],["robustness",{"2":{"13":1,"49":1}}],["row",{"2":{"7":3}}],["rows",{"2":{"6":1,"7":18,"183":5}}],["roughly",{"2":{"100":1,"183":3}}],["routines",{"2":{"67":2,"183":2}}],["routing",{"0":{"18":1},"2":{"18":1,"136":1,"138":1,"181":3}}],["routed",{"2":{"138":1}}],["route",{"2":{"138":1}}],["router",{"2":{"10":1,"106":1,"138":1}}],["routes",{"2":{"0":1}}],["rounds=3",{"2":{"183":1}}],["rounds=5",{"2":{"179":1}}],["rounds",{"2":{"52":1,"108":1,"130":1,"179":18,"183":1}}],["round",{"2":{"7":2,"179":5}}],["raises",{"2":{"181":1}}],["raised",{"2":{"52":1,"157":1,"181":2}}],["rainy",{"2":{"181":4}}],["rationale",{"2":{"183":2}}],["ratio",{"2":{"152":1,"179":1}}],["rating",{"2":{"6":1,"122":1,"183":3}}],["ratelimit",{"2":{"74":2}}],["rate",{"0":{"74":1},"2":{"74":3,"183":3}}],["rare",{"2":{"73":1}}],["radius",{"2":{"58":1,"181":6,"183":1}}],["raw=true",{"2":{"181":1}}],["raw",{"2":{"52":1,"55":2,"180":2,"181":1}}],["rand",{"2":{"52":2,"181":3,"183":4}}],["random",{"2":{"52":1,"179":2,"181":3}}],["range",{"2":{"30":1,"31":1,"58":1,"181":1}}],["ranked",{"2":{"183":5}}],["rankermodel",{"2":{"183":1}}],["ranker",{"2":{"183":1}}],["rankgptresult",{"2":{"181":1,"183":4}}],["rankgptreranker",{"2":{"181":1,"183":4}}],["rankgpt",{"2":{"112":3,"183":9}}],["rankings",{"2":{"183":2}}],["ranking",{"0":{"111":1},"1":{"112":1},"2":{"8":1,"63":1,"112":1,"181":1,"183":15}}],["rankanswer",{"2":{"7":1}}],["rank",{"2":{"6":1,"112":4,"181":5,"183":40}}],["ranks",{"2":{"2":1,"183":3}}],["ragjuliaqueryhyde",{"0":{"124":1}}],["ragjudgeanswerfromcontextshort",{"0":{"122":1}}],["ragjudgeanswerfromcontext",{"0":{"121":1},"2":{"6":1,"183":2}}],["ragwebsearchrefiner",{"0":{"118":1},"2":{"183":2}}],["ragextractmetadatalong",{"0":{"114":1}}],["ragextractmetadatashort",{"0":{"115":1},"2":{"67":1,"183":4}}],["ragrankgpt",{"0":{"112":1},"2":{"183":1}}],["ragresult",{"2":{"52":3,"61":3,"66":2,"67":6,"181":1,"183":19}}],["ragcreateqafromcontext",{"0":{"120":1},"2":{"67":1,"183":1}}],["ragconfig",{"2":{"61":1,"63":1,"65":1,"67":3,"181":1,"183":10}}],["ragcontext",{"2":{"6":1}}],["ragquerysimplifier",{"0":{"128":1}}],["ragquerykeywordexpander",{"0":{"126":1}}],["ragqueryoptimizer",{"0":{"127":1},"2":{"67":1,"183":3}}],["ragqueryhyde",{"0":{"125":1},"2":{"65":3,"67":1,"183":4}}],["ragdetails",{"2":{"67":1,"183":1}}],["raganswerrefiner",{"0":{"117":1},"2":{"67":2,"183":4}}],["raganswerfromcontext",{"0":{"110":1},"2":{"67":2,"183":4}}],["ragtoolsexperimentalext",{"2":{"183":1}}],["ragtools",{"0":{"1":1,"183":1},"1":{"2":1},"2":{"1":3,"10":1,"57":2,"59":3,"63":1,"66":1,"67":6,"181":138,"182":2,"183":280}}],["rag",{"0":{"1":1,"2":1,"59":1,"62":1,"64":1,"109":1},"1":{"2":1,"60":1,"61":1,"62":1,"63":2,"64":2,"65":3,"66":2,"67":1,"110":1},"2":{"1":3,"2":1,"6":1,"7":2,"8":1,"10":2,"59":3,"60":5,"61":3,"64":1,"66":1,"67":8,"106":1,"110":1,"114":1,"115":1,"117":1,"118":1,"120":1,"121":1,"122":1,"124":1,"125":1,"126":1,"127":1,"128":1,"182":1,"183":22}}],["r",{"2":{"2":1,"21":2,"51":2,"52":1,"61":1,"107":1,"179":1,"183":2}}],["rt",{"2":{"1":1,"4":3,"6":1,"59":1,"67":3,"183":21}}],["rewritten",{"2":{"181":2}}],["reveal",{"2":{"174":1}}],["revisions",{"2":{"141":1}}],["revise",{"2":{"140":3,"141":3,"142":3}}],["revised",{"2":{"127":1,"130":1,"181":2}}],["reviewing",{"2":{"181":2}}],["review",{"2":{"4":1,"8":1,"49":1,"130":1,"131":1,"140":1,"141":1,"142":1,"174":1}}],["rejected",{"2":{"152":1,"181":1}}],["reorganization",{"2":{"140":1}}],["reuse",{"2":{"89":1}}],["recipient",{"2":{"181":4}}],["reciprocal",{"2":{"181":2,"183":8}}],["recall",{"2":{"130":1,"171":1,"172":2}}],["recognized",{"2":{"183":1}}],["recognizes",{"2":{"105":1}}],["record",{"2":{"181":1}}],["recorded",{"2":{"67":1,"181":1,"183":1}}],["recommended",{"2":{"181":2}}],["recommendation",{"2":{"58":1,"181":1}}],["recommend",{"2":{"58":2,"90":1,"181":2}}],["recursive=true",{"2":{"73":1}}],["recursively",{"2":{"58":1,"181":1,"183":1}}],["recursivecharactertextsplitter",{"0":{"90":1},"2":{"58":3,"90":1,"181":3}}],["recursive",{"2":{"57":2,"58":8,"90":3,"130":1,"181":11,"183":1}}],["receiving",{"2":{"181":3}}],["receive",{"2":{"74":1,"76":1,"181":3,"183":2}}],["received",{"2":{"10":1,"52":1,"88":2,"106":1,"179":1,"181":9}}],["recent",{"2":{"179":2,"181":2}}],["recently",{"2":{"74":1}}],["reception",{"2":{"47":1}}],["requested",{"2":{"130":1,"171":1,"172":1,"177":1,"181":3,"183":4}}],["request",{"2":{"74":2,"77":1,"87":2,"88":1,"100":1,"103":1,"105":1,"107":1,"108":3,"130":3,"131":2,"141":4,"142":6,"181":25}}],["requests",{"0":{"75":1},"2":{"55":1,"74":7,"75":3,"180":2,"181":7}}],["requirement",{"2":{"108":2}}],["requirements",{"2":{"0":1,"140":1,"141":1,"142":4,"144":1,"145":1,"147":1,"181":1,"183":1}}],["required",{"2":{"59":1,"107":2,"108":7,"150":1,"179":1,"181":13,"183":6}}],["require",{"2":{"15":1,"120":1,"181":1}}],["requires",{"2":{"0":1,"29":1,"30":1,"31":1,"56":1,"67":2,"91":1,"124":1,"141":1,"181":11,"183":7}}],["removing",{"2":{"179":2,"183":1}}],["removes",{"2":{"179":1,"181":6}}],["remove",{"2":{"52":5,"58":1,"179":4,"181":16,"183":1}}],["reminder",{"2":{"171":1,"172":1}}],["remaining",{"2":{"74":2}}],["remembers",{"2":{"181":1}}],["remembered",{"2":{"181":1}}],["remember",{"2":{"2":1,"10":1,"24":1,"35":1,"49":1,"73":1,"89":1,"103":2,"106":1,"107":1,"140":1,"141":1,"142":1,"181":4}}],["reducing",{"2":{"67":1,"183":5}}],["reduces",{"2":{"183":2}}],["reduce",{"2":{"67":1,"183":4}}],["red",{"2":{"21":1,"51":1,"52":2,"61":1,"179":2}}],["react",{"2":{"181":1}}],["reach",{"2":{"102":1,"179":1}}],["reaching",{"2":{"58":1,"140":1,"181":1}}],["reasonable",{"2":{"183":3}}],["reasonably",{"2":{"183":1}}],["reason",{"2":{"181":9}}],["reasoning",{"2":{"144":3,"174":1,"181":5}}],["real",{"2":{"179":5,"183":12}}],["really",{"2":{"19":1,"86":1,"181":4}}],["reader=",{"2":{"183":1}}],["reads",{"2":{"183":2}}],["readme",{"2":{"181":1}}],["readability",{"2":{"140":1}}],["ready",{"2":{"11":1,"84":1,"86":1,"181":1,"182":1}}],["readtimeout",{"2":{"10":1,"106":1,"181":12}}],["read",{"2":{"4":1,"11":1,"24":1,"61":1,"75":1,"92":1,"108":5,"114":1,"122":1,"130":1,"131":1,"155":1,"157":1,"181":1,"183":1}}],["regarding",{"2":{"144":1,"145":1,"147":1}}],["regards",{"2":{"89":2,"181":2}}],["regardless",{"2":{"7":2}}],["region",{"2":{"32":1,"181":2}}],["regions",{"2":{"32":1,"181":3}}],["registration",{"2":{"29":1}}],["registry",{"2":{"23":1,"26":1,"37":2,"39":1,"42":1,"107":1,"181":29}}],["registers",{"2":{"181":1}}],["registering",{"2":{"181":3}}],["register",{"2":{"27":2,"28":2,"29":2,"30":2,"31":2,"42":1,"85":2,"86":2,"93":3,"95":1,"105":1,"181":15,"183":1}}],["registered",{"2":{"23":1,"26":1,"30":1,"31":1,"89":1,"181":1}}],["regenerate",{"2":{"21":1,"51":1}}],["regex",{"2":{"19":1,"24":1,"67":1,"181":2,"183":6}}],["renders",{"2":{"181":5}}],["rendered",{"0":{"92":1},"2":{"92":1,"107":7,"181":2,"183":1}}],["render",{"2":{"13":1,"24":2,"92":6,"100":2,"102":1,"107":6,"181":24}}],["rendering",{"2":{"13":1,"92":4,"102":1,"181":5}}],["refusals",{"2":{"181":3}}],["refresh",{"2":{"130":1,"181":3}}],["refining",{"2":{"67":2,"183":5}}],["refines",{"2":{"179":1,"183":3}}],["refined",{"2":{"117":3,"118":3,"183":1}}],["refinements",{"2":{"179":1}}],["refinement",{"0":{"116":1},"1":{"117":1,"118":1},"2":{"183":2}}],["refiner",{"2":{"67":13,"183":24}}],["refine",{"2":{"64":1,"66":2,"67":4,"117":6,"118":6,"127":1,"179":1,"181":3,"183":17}}],["ref",{"2":{"58":1,"181":1}}],["reflections",{"2":{"140":1,"141":2,"142":1}}],["reflection",{"2":{"130":1,"131":1,"140":4,"141":4,"142":4}}],["reflecting",{"2":{"58":1,"130":1,"156":1,"181":1}}],["reflects",{"2":{"122":1}}],["reflect",{"2":{"12":1,"140":1,"141":1,"142":1}}],["referring",{"2":{"183":1}}],["referred",{"2":{"7":1,"29":1}}],["refers",{"2":{"10":1,"28":1,"49":1,"106":1,"179":1,"181":2,"183":2}}],["references",{"0":{"52":1,"55":1,"58":1,"67":1},"2":{"52":1,"63":1,"126":1,"179":1,"183":3}}],["reference",{"0":{"179":1,"180":1,"181":1,"182":1,"183":1},"2":{"4":1,"10":1,"81":1,"90":1,"92":1,"120":3,"181":10,"183":12}}],["refer",{"2":{"0":1,"86":2,"92":1,"130":1,"156":1,"181":6}}],["repetition",{"2":{"120":1}}],["repeats",{"2":{"130":2}}],["repeat",{"2":{"21":1,"51":1,"130":1,"155":1,"156":1,"163":1}}],["repeated",{"2":{"21":1,"51":1,"130":1,"181":2}}],["repeatedly",{"2":{"10":1,"49":1,"106":1,"179":1}}],["repo",{"2":{"86":1}}],["report",{"2":{"181":1}}],["reports",{"2":{"67":1,"140":1,"183":1}}],["reported",{"2":{"36":1,"181":1}}],["rephrasing",{"2":{"67":6,"126":2,"127":1,"183":19}}],["rephrases",{"2":{"127":1,"128":1,"183":2}}],["rephraser",{"2":{"65":7,"67":11,"183":25}}],["rephrased",{"2":{"61":1,"66":1,"67":2,"126":1,"128":1,"183":9}}],["rephrase",{"2":{"8":1,"64":1,"65":2,"66":2,"67":1,"124":1,"125":1,"127":2,"128":1,"181":3,"183":16}}],["representation",{"2":{"183":1}}],["representative",{"2":{"7":1}}],["represented",{"2":{"181":2,"183":8}}],["represents",{"2":{"157":1,"181":2,"183":1}}],["representing",{"2":{"52":1,"58":1,"67":3,"121":1,"181":90,"183":3}}],["reply",{"2":{"52":1,"87":1,"97":1,"140":1,"141":1,"142":1,"179":1,"181":3}}],["repl",{"2":{"24":2,"61":1,"73":3,"82":1,"98":1,"181":1}}],["replaces",{"2":{"183":1}}],["replaced",{"2":{"58":1,"107":1,"181":3,"183":1}}],["replacements",{"2":{"92":1,"181":6}}],["replacement",{"2":{"58":4,"107":1,"181":8}}],["replace",{"2":{"12":1,"57":2,"58":3,"67":1,"92":1,"100":1,"105":2,"107":3,"181":5,"183":2}}],["rescore",{"2":{"183":8}}],["resized",{"2":{"181":4}}],["resize",{"2":{"181":5}}],["reserved",{"2":{"171":1,"172":1,"181":3}}],["reset",{"2":{"74":1,"179":2,"181":1}}],["resets",{"2":{"74":1}}],["researcher",{"2":{"157":1,"161":1}}],["research",{"2":{"61":1,"69":1,"114":1}}],["res",{"2":{"67":1,"183":1}}],["resolutions",{"2":{"181":1}}],["resolution",{"2":{"181":1}}],["resolved",{"2":{"181":1}}],["resolves",{"2":{"130":1}}],["resolve",{"2":{"67":1,"183":4}}],["resource",{"2":{"181":3}}],["resources",{"2":{"23":1,"26":1,"61":1,"71":1,"72":1,"76":1,"77":1,"79":2,"95":2}}],["respect",{"2":{"183":1}}],["respective",{"2":{"183":1}}],["respectively",{"2":{"52":1,"58":1,"63":1,"179":2,"181":1}}],["resp",{"2":{"181":3}}],["respond",{"2":{"10":1,"52":2,"87":1,"106":1,"112":1,"127":1,"136":2,"138":2,"179":3,"181":2}}],["responses",{"0":{"88":1},"2":{"14":1,"24":1,"88":1,"107":1,"141":1,"157":10,"159":2,"179":2,"181":13}}],["response",{"2":{"7":1,"10":5,"49":1,"63":1,"66":5,"67":5,"74":1,"77":1,"87":4,"88":14,"97":1,"104":2,"106":5,"107":5,"108":8,"121":1,"130":1,"140":1,"141":2,"142":1,"170":1,"179":1,"180":1,"181":85,"183":8}}],["restrictive",{"2":{"171":1,"172":1}}],["restricted",{"2":{"6":1,"7":1}}],["restart",{"2":{"42":1,"52":1,"179":1}}],["rest",{"2":{"19":1,"55":1,"96":1,"103":1,"179":1,"180":1}}],["resulting",{"2":{"183":5}}],["results",{"2":{"7":10,"13":1,"19":1,"52":2,"55":8,"61":1,"66":1,"112":1,"117":1,"118":12,"124":1,"125":1,"127":2,"128":1,"131":2,"132":1,"179":6,"180":8,"181":2,"183":16}}],["result",{"2":{"2":1,"7":1,"52":3,"60":1,"61":7,"64":1,"65":2,"67":22,"106":2,"108":8,"179":9,"181":30,"183":70}}],["retain",{"2":{"179":1,"181":2}}],["retrive",{"2":{"63":1,"183":1}}],["retries=3",{"2":{"108":1}}],["retries=2",{"2":{"88":1}}],["retries`",{"2":{"52":1,"179":1}}],["retries",{"2":{"21":6,"51":3,"52":22,"108":2,"179":26,"181":12}}],["retrieving",{"2":{"127":1,"183":1}}],["retrievable",{"2":{"67":1,"183":1}}],["retrieval",{"0":{"1":1},"1":{"2":1},"2":{"1":1,"6":3,"7":5,"59":1,"63":2,"66":2,"67":13,"126":3,"128":1,"181":2,"182":1,"183":25}}],["retrieves",{"2":{"67":1,"183":1}}],["retriever=advancedretriever",{"2":{"67":1,"183":1}}],["retriever",{"2":{"64":1,"65":6,"67":35,"183":44}}],["retrieved",{"2":{"60":1,"63":1,"66":1,"67":1,"183":3}}],["retrieve",{"2":{"10":2,"60":2,"61":4,"63":2,"64":3,"65":1,"66":1,"67":14,"105":1,"181":1,"183":21}}],["retrying",{"2":{"21":1,"49":2,"52":7,"88":2,"108":1,"179":7}}],["retry",{"2":{"21":3,"49":1,"51":3,"52":15,"108":6,"179":22,"181":12}}],["retryconfig",{"2":{"21":3,"49":1,"51":3,"52":10,"179":14,"181":1}}],["returning",{"2":{"181":2,"183":1}}],["returned",{"2":{"97":1,"108":2,"179":1,"181":15,"183":3}}],["returns",{"2":{"7":7,"10":5,"18":3,"52":12,"57":1,"58":5,"67":11,"73":1,"106":5,"107":2,"140":1,"141":1,"142":1,"179":16,"180":1,"181":78,"183":47}}],["return",{"2":{"2":1,"6":2,"7":1,"10":4,"18":1,"19":4,"24":1,"31":1,"52":6,"55":1,"58":1,"61":3,"63":2,"67":11,"77":1,"87":5,"88":5,"92":4,"93":1,"106":5,"108":11,"117":2,"118":3,"179":13,"180":1,"181":106,"183":33}}],["reranking",{"2":{"66":1,"67":8,"183":16}}],["reranker",{"2":{"63":2,"67":8,"183":19}}],["reranked",{"2":{"61":1,"183":5}}],["rerank",{"2":{"2":2,"8":2,"63":3,"64":1,"66":2,"67":5,"181":2,"183":28}}],["reload",{"2":{"89":3,"181":2}}],["relentless",{"2":{"58":1,"181":1}}],["releases",{"2":{"182":1}}],["release",{"2":{"35":1}}],["relevancy",{"2":{"112":1,"181":1}}],["relevance",{"2":{"6":1,"112":2,"121":2,"140":1,"183":8}}],["relevant",{"2":{"2":1,"60":1,"61":1,"63":1,"64":2,"66":3,"67":7,"96":1,"112":1,"114":1,"118":1,"121":2,"122":1,"126":3,"127":1,"128":1,"150":1,"155":1,"156":1,"179":2,"181":1,"183":11}}],["related",{"2":{"13":1,"75":1,"120":1,"126":2,"157":1,"181":2,"183":2}}],["relational",{"2":{"7":1}}],["rely",{"2":{"0":1}}],["re",{"2":{"1":1,"2":3,"7":1,"8":1,"10":1,"12":1,"20":1,"22":2,"23":1,"24":11,"26":1,"29":1,"31":1,"35":1,"41":1,"42":1,"49":1,"58":2,"61":1,"63":2,"65":1,"67":1,"73":1,"75":1,"78":1,"84":2,"93":2,"100":1,"104":1,"105":4,"106":1,"107":5,"108":3,"112":1,"114":1,"121":1,"122":1,"124":1,"131":1,"160":2,"164":1,"166":1,"167":1,"168":2,"173":1,"174":1,"176":2,"179":1,"181":16,"183":16}}],["gsk",{"2":{"181":1}}],["ggi",{"2":{"181":3}}],["gguf",{"2":{"28":1}}],["gnarled",{"2":{"58":1,"181":1}}],["glossy",{"2":{"181":1}}],["globally",{"2":{"49":1}}],["global",{"2":{"0":1,"42":1,"52":1,"181":3}}],["glittering",{"2":{"58":1,"181":1}}],["glasses",{"2":{"39":1}}],["glad",{"2":{"31":1}}],["gpu=99",{"2":{"37":2}}],["gpu",{"2":{"28":1,"37":1}}],["gpt4o",{"2":{"183":3}}],["gpt4v",{"2":{"20":2,"181":5}}],["gpt4",{"2":{"15":1,"97":1,"98":1,"181":5}}],["gpt4t",{"2":{"6":1,"7":2,"15":3,"17":1,"21":1,"51":1,"97":1,"181":5,"183":2}}],["gpt35",{"2":{"181":2}}],["gpt3t",{"2":{"107":1}}],["gpt3",{"2":{"15":1,"107":1,"181":5}}],["gpt",{"2":{"6":1,"7":1,"15":5,"93":4,"97":2,"98":1,"181":21,"183":5}}],["guidance",{"2":{"108":1}}],["guidelines>",{"2":{"178":2}}],["guidelines",{"2":{"140":1,"156":1,"163":3,"170":2,"171":1,"172":1,"178":1}}],["guides",{"2":{"75":1,"181":1}}],["guide",{"0":{"84":1},"2":{"22":1,"41":1,"52":2,"61":2,"69":1,"79":2,"95":1,"170":1,"178":1,"179":3,"181":2}}],["guarantees",{"2":{"88":1}}],["guardian",{"2":{"58":1,"181":1}}],["guardrails",{"2":{"52":1,"179":1}}],["guessed",{"2":{"52":1,"179":1}}],["guesser",{"2":{"52":3,"179":3}}],["guesses",{"2":{"21":1,"51":1,"52":2,"179":2}}],["guess",{"2":{"21":1,"51":2,"52":31,"179":31}}],["guessing",{"2":{"21":1,"51":2,"52":1,"179":1}}],["g",{"2":{"19":1,"52":2,"112":1,"136":1,"138":1,"141":1,"181":8}}],["giraffe",{"2":{"181":7}}],["github",{"2":{"58":1,"73":1,"79":1,"95":1,"112":1,"181":6,"183":5}}],["gitignore",{"2":{"11":3}}],["give",{"2":{"24":1,"52":1,"70":1,"88":2,"179":1,"183":1}}],["given",{"2":{"19":2,"58":4,"60":1,"67":2,"74":1,"117":2,"118":2,"127":1,"136":2,"138":2,"141":1,"150":1,"152":1,"157":1,"161":1,"165":1,"167":1,"169":1,"179":5,"181":34,"183":8}}],["gives",{"2":{"5":1,"13":1,"100":1,"101":1,"117":1,"118":1,"181":2}}],["grammer",{"2":{"181":1}}],["grammatical",{"2":{"140":1}}],["grammar",{"2":{"108":1,"140":1,"181":1}}],["gracefully",{"2":{"181":6}}],["grasp",{"2":{"127":1}}],["granularity",{"2":{"61":1}}],["grab",{"2":{"52":1,"179":1}}],["gratefully",{"2":{"12":1}}],["grins",{"2":{"41":2}}],["group",{"2":{"88":1,"181":1,"183":4}}],["grow",{"2":{"12":1}}],["groq",{"2":{"0":1,"181":6}}],["groqopenaischema",{"2":{"0":1,"181":2}}],["greater",{"2":{"181":1,"183":1}}],["greatingpirate",{"2":{"89":5,"181":7}}],["great",{"2":{"11":1,"24":2,"58":2,"97":1,"162":2,"181":2}}],["gt",{"2":{"7":14,"10":3,"15":1,"21":3,"37":1,"42":2,"51":2,"52":4,"58":1,"64":16,"67":9,"78":1,"79":1,"80":1,"84":1,"85":2,"89":3,"95":1,"98":2,"100":10,"102":1,"105":3,"106":3,"107":2,"108":5,"179":7,"181":26,"183":15}}],["gamma",{"2":{"179":4,"181":1}}],["game",{"2":{"21":1,"51":1,"52":2,"174":1,"179":2}}],["gaps",{"2":{"163":1}}],["gaze",{"2":{"58":1,"181":1}}],["gauge",{"2":{"24":1}}],["gave",{"2":{"12":1}}],["gain",{"2":{"6":1}}],["garbage",{"2":{"2":2}}],["goes",{"2":{"181":1}}],["goals",{"2":{"140":3,"141":1}}],["goal",{"2":{"127":1,"130":1,"181":2}}],["going",{"2":{"65":1,"108":1}}],["got",{"2":{"52":2,"179":2,"181":1}}],["gotchas",{"0":{"36":1},"2":{"52":1,"179":1}}],["good",{"2":{"4":1,"8":1,"13":1,"24":2,"52":1,"76":1,"88":1,"150":1,"170":1,"178":1,"179":1,"181":3,"183":5}}],["googlegenaipromptingtoolsext",{"2":{"181":1}}],["googlegenai",{"2":{"32":2,"181":1}}],["google",{"0":{"32":1},"1":{"33":1,"34":1,"35":1,"36":1},"2":{"0":1,"11":1,"32":3,"36":1,"70":1,"114":1,"181":9}}],["googleschema",{"2":{"0":1,"181":2}}],["golden",{"2":{"4":1,"8":1}}],["go",{"2":{"2":1,"12":1,"41":3,"67":1,"72":1,"76":1,"84":2,"95":2,"105":1,"170":1,"178":1,"183":2}}],["germany",{"2":{"183":2}}],["genai",{"2":{"77":1,"182":1}}],["gensym",{"2":{"67":2,"183":3}}],["genie",{"2":{"61":1}}],["general",{"0":{"148":1},"1":{"149":1,"150":1},"2":{"10":1,"23":1,"26":1,"106":1,"108":1,"114":1,"120":1,"156":2,"170":2,"171":1,"172":1,"178":3,"181":1}}],["generally",{"2":{"7":1,"108":1}}],["generator",{"2":{"64":1,"65":1,"67":24,"183":25}}],["generating",{"2":{"10":1,"48":1,"67":11,"106":1,"152":1,"179":2,"181":18,"183":20}}],["generativeai",{"2":{"53":1}}],["generative",{"2":{"1":1,"56":1,"57":1,"60":1}}],["generation",{"0":{"1":1,"33":1,"38":1},"1":{"2":1,"34":1,"35":1,"36":1,"39":1,"40":1,"41":1,"42":1},"2":{"1":1,"6":1,"51":1,"59":1,"63":2,"66":3,"67":3,"108":1,"120":1,"179":2,"181":25,"182":1,"183":7}}],["generated",{"2":{"6":1,"8":1,"10":5,"18":1,"24":1,"47":1,"49":1,"52":2,"57":1,"58":1,"60":1,"61":1,"67":5,"71":1,"97":1,"106":2,"120":2,"179":2,"181":30,"183":10}}],["generates",{"2":{"2":1,"10":1,"21":1,"66":3,"67":1,"106":1,"124":1,"125":1,"181":5,"183":4}}],["generate",{"0":{"4":1},"2":{"0":1,"2":1,"3":1,"7":1,"8":1,"10":4,"49":3,"52":1,"57":2,"59":1,"60":3,"61":5,"63":2,"64":3,"66":2,"67":10,"88":2,"89":1,"91":1,"95":1,"106":2,"108":3,"120":1,"124":1,"126":1,"141":1,"142":1,"150":1,"152":4,"179":3,"181":34,"183":18}}],["genericwriter",{"0":{"165":1}}],["generictopicexpertask",{"0":{"164":1}}],["generictranscriptcritic",{"0":{"141":1}}],["generic",{"2":{"4":1,"105":1,"120":1,"141":1,"160":1,"164":1,"165":1}}],["gestures",{"2":{"41":1}}],["getpropertynested",{"2":{"67":2,"181":1,"183":5}}],["getindex",{"2":{"52":1,"179":1}}],["getting",{"0":{"73":2,"74":1,"75":1,"94":1},"1":{"95":1,"96":1,"97":1,"98":1},"2":{"22":1,"155":1}}],["get",{"2":{"10":1,"21":1,"32":1,"46":1,"51":2,"52":3,"55":2,"59":1,"61":1,"64":5,"66":5,"67":11,"69":1,"72":2,"73":2,"78":1,"79":1,"80":1,"84":1,"85":1,"87":1,"88":4,"95":1,"97":1,"106":1,"107":1,"108":2,"179":4,"180":2,"181":83,"183":60}}],["gemini",{"2":{"0":1,"32":3,"33":2,"34":3,"35":1,"36":1,"181":11}}],["tf",{"2":{"183":1}}],["td",{"2":{"183":1}}],["tp",{"2":{"183":1}}],["tpl=pt",{"2":{"89":1,"181":2}}],["tpl",{"2":{"24":1,"89":2,"92":2,"181":2}}],["tsang",{"2":{"179":1}}],["tldr",{"2":{"152":6,"167":1}}],["tl",{"2":{"39":1}}],["tmixtral",{"2":{"30":2,"108":2}}],["tmp",{"2":{"24":1}}],["tmps",{"2":{"13":1,"181":2}}],["typically",{"2":{"181":3}}],["typing",{"2":{"20":1,"181":2}}],["typed",{"0":{"88":1},"2":{"88":2,"96":1}}],["type=fruit",{"2":{"181":1}}],["type=food",{"2":{"10":1,"31":1,"106":1,"108":1}}],["type=maybetags",{"2":{"183":1}}],["type=manymeasurements",{"2":{"19":1,"181":2}}],["type=mymeasurement",{"2":{"181":5}}],["type=currentweather",{"2":{"19":1}}],["types",{"2":{"12":1,"52":2,"63":2,"64":4,"66":1,"67":7,"82":1,"85":1,"88":4,"104":1,"106":1,"108":4,"171":2,"172":2,"179":2,"181":16,"183":8}}],["type",{"2":{"6":1,"10":1,"11":1,"15":1,"19":2,"52":3,"60":1,"61":1,"63":2,"65":2,"66":1,"67":3,"88":2,"93":1,"97":1,"100":1,"106":1,"108":21,"130":1,"171":1,"172":1,"179":8,"181":153,"183":81}}],["tiktokenizer",{"2":{"181":1}}],["titles",{"2":{"155":2,"156":2}}],["title",{"2":{"152":2,"155":2,"156":1,"161":1,"167":1}}],["tiniest",{"2":{"132":1}}],["tinyrag",{"2":{"183":2}}],["tiny",{"2":{"23":3,"26":3,"181":2,"183":1}}],["tier",{"0":{"78":1},"2":{"74":3,"78":1,"181":2}}],["timing",{"2":{"52":1,"179":1}}],["timed",{"2":{"52":1,"179":1}}],["timeout",{"2":{"52":3,"181":8}}],["timestamp",{"2":{"155":2,"156":3,"181":4}}],["timestamps",{"2":{"155":3,"156":2}}],["times",{"2":{"21":1,"49":2,"51":1,"52":3,"89":1,"179":4}}],["time",{"2":{"3":1,"7":1,"10":1,"13":1,"21":1,"24":3,"31":1,"36":1,"51":1,"52":3,"58":1,"71":1,"77":2,"79":1,"85":1,"91":1,"100":1,"103":1,"105":1,"106":1,"179":4,"181":29,"182":1,"183":3}}],["tired",{"2":{"19":1}}],["tips",{"2":{"21":1,"58":1,"155":2,"156":2,"181":2}}],["tip",{"2":{"14":1,"58":1,"72":1,"97":1,"98":2,"115":1,"181":1}}],["tell",{"2":{"174":1}}],["tedious",{"2":{"85":1}}],["tens",{"2":{"77":1}}],["tenth",{"2":{"75":1}}],["tends",{"2":{"90":1}}],["tend",{"2":{"10":1,"58":1,"70":1,"85":1,"106":1,"181":1}}],["terms",{"2":{"114":1,"126":2}}],["term",{"2":{"59":1,"114":1,"183":1}}],["terminal",{"2":{"28":1,"37":1,"57":1,"61":1,"79":4,"84":1,"95":2}}],["testing",{"2":{"181":5}}],["testechoopenaischema",{"2":{"181":2}}],["testechoollamaschema",{"2":{"181":2}}],["testechoollamamanagedschema",{"2":{"181":2}}],["testechogoogleschema",{"2":{"181":2}}],["testechoanthropicschema",{"2":{"181":2}}],["test`",{"2":{"130":1,"170":3,"178":3}}],["tests>",{"2":{"178":2}}],["testset`",{"2":{"170":1,"178":1}}],["testsets",{"2":{"170":1,"178":1}}],["testset",{"2":{"52":1,"130":1,"170":2,"178":2,"181":1}}],["tests",{"2":{"52":4,"130":1,"170":4,"178":4,"181":5}}],["test",{"2":{"42":1,"52":2,"67":5,"170":14,"174":1,"178":14,"181":9,"183":11}}],["teacher",{"2":{"120":1}}],["teach",{"2":{"41":1}}],["technical",{"2":{"114":2,"126":1}}],["technically",{"2":{"28":1}}],["technique",{"2":{"91":1}}],["techniques",{"2":{"61":1}}],["technology",{"2":{"41":1,"161":1}}],["tempdir",{"2":{"181":1}}],["temporary",{"2":{"181":1}}],["temperature=>float64",{"2":{"181":1}}],["temperature=0",{"2":{"10":1,"52":3,"93":1,"106":3,"108":2,"179":3,"181":3}}],["temperature",{"2":{"19":1,"179":2,"181":20}}],["temperatureunits",{"2":{"19":2}}],["templating",{"2":{"13":1,"98":1,"181":1}}],["template",{"0":{"89":1,"110":1,"112":1,"114":1,"115":1,"117":1,"118":1,"120":1,"121":1,"122":1,"124":1,"125":1,"126":1,"127":1,"128":1,"130":1,"131":1,"132":1,"134":1,"136":1,"137":1,"138":1,"140":1,"141":1,"142":1,"144":1,"145":1,"147":1,"149":1,"150":1,"152":1,"153":1,"155":1,"156":1,"157":1,"160":1,"161":1,"162":1,"163":1,"164":1,"165":1,"166":1,"167":1,"168":1,"169":1,"170":1,"171":1,"172":1,"173":1,"174":1,"176":1,"177":1,"178":1},"2":{"13":5,"17":1,"20":1,"24":10,"52":1,"63":2,"65":5,"67":17,"89":13,"92":1,"93":1,"105":7,"107":4,"126":1,"130":1,"131":1,"132":1,"140":1,"141":1,"142":1,"144":1,"145":1,"147":1,"149":1,"155":1,"156":1,"157":1,"162":1,"163":1,"171":1,"172":1,"179":9,"181":129,"183":47}}],["templated",{"0":{"13":1},"2":{"17":1,"24":1}}],["templates=true",{"2":{"181":1}}],["templates",{"0":{"105":1,"109":1,"111":1,"113":1,"116":1,"119":1,"123":1,"129":1,"133":1,"135":1,"139":1,"143":1,"146":1,"148":1,"151":1,"154":1,"175":1},"1":{"110":1,"112":1,"114":1,"115":1,"117":1,"118":1,"120":1,"121":1,"122":1,"124":1,"125":1,"126":1,"127":1,"128":1,"130":1,"131":1,"132":1,"134":1,"136":1,"137":1,"138":1,"140":1,"141":1,"142":1,"144":1,"145":1,"147":1,"149":1,"150":1,"152":1,"153":1,"155":1,"156":1,"157":1,"176":1,"177":1,"178":1},"2":{"10":1,"13":2,"17":1,"24":13,"89":12,"92":2,"93":1,"98":1,"100":1,"105":3,"106":1,"179":4,"181":48}}],["text=",{"2":{"183":1}}],["textchunker",{"2":{"67":1,"181":1,"183":7}}],["text1",{"2":{"58":1,"181":1}}],["text2",{"2":{"58":2,"181":2}}],["texts",{"2":{"58":2,"67":3,"140":1,"181":2,"183":6}}],["textanalysis",{"2":{"8":1}}],["text",{"0":{"33":1,"38":1,"56":1},"1":{"34":1,"35":1,"36":1,"39":1,"40":1,"41":1,"42":1,"57":1,"58":1},"2":{"2":3,"8":1,"10":6,"16":1,"19":2,"20":1,"30":1,"31":2,"52":3,"56":2,"57":7,"58":41,"61":1,"67":9,"90":2,"104":1,"106":4,"108":1,"114":7,"115":6,"140":8,"152":2,"153":1,"163":4,"165":1,"167":1,"179":1,"181":84,"183":36}}],["trove",{"2":{"89":2,"181":2}}],["troubleshooting",{"2":{"37":1}}],["treated",{"2":{"181":2}}],["treasure",{"2":{"89":2,"181":2}}],["trees",{"2":{"52":1,"179":3}}],["tree",{"2":{"18":2,"21":2,"49":1,"52":7,"58":1,"67":2,"88":1,"179":17,"181":7,"183":4}}],["truncation",{"2":{"183":2}}],["truncated",{"2":{"181":4,"183":1}}],["truncates",{"2":{"179":1}}],["truncate",{"2":{"179":2,"181":2,"183":17}}],["trusted",{"2":{"61":1}}],["truths",{"2":{"58":1,"181":1}}],["true",{"2":{"4":1,"6":2,"7":1,"12":2,"17":3,"21":1,"35":1,"45":1,"51":1,"52":32,"55":1,"67":17,"88":4,"89":2,"92":2,"137":2,"179":25,"180":1,"181":135,"183":58}}],["traced",{"2":{"181":2}}],["tracemessage",{"2":{"181":1}}],["trace",{"2":{"181":2}}],["tracers",{"2":{"181":3}}],["tracerschema",{"2":{"93":7,"181":17}}],["tracer",{"2":{"181":102}}],["tracermessagelike",{"2":{"181":2}}],["tracermessage",{"2":{"93":2,"181":10}}],["tracing",{"0":{"93":1},"2":{"93":2,"181":16}}],["tracked",{"2":{"181":1,"183":1}}],["tracker",{"2":{"67":9,"181":1,"183":28}}],["tracking",{"2":{"179":1,"181":2}}],["tracks",{"2":{"67":1,"181":1,"183":1}}],["track",{"2":{"67":5,"179":1,"181":1,"183":16}}],["trained",{"2":{"155":1,"156":1,"161":1}}],["train",{"2":{"71":1}}],["training",{"2":{"71":1,"181":1}}],["trailing",{"2":{"181":2}}],["trail",{"2":{"58":1,"181":1}}],["transcripts",{"2":{"155":2,"156":2}}],["transcript",{"2":{"140":5,"141":6,"142":4,"155":7,"156":6,"161":6}}],["transcribe",{"2":{"20":2,"153":2,"181":8}}],["transformation",{"2":{"181":1}}],["transformations",{"0":{"123":1},"1":{"124":1,"125":1,"126":1,"127":1,"128":1},"2":{"125":1}}],["transform",{"2":{"67":1,"181":1,"183":2}}],["translates",{"2":{"181":2}}],["translate",{"2":{"14":1,"181":2,"183":6}}],["tryparse",{"2":{"52":4,"88":2,"179":4,"181":1}}],["try",{"0":{"78":1},"2":{"18":1,"31":1,"41":1,"52":2,"67":1,"89":1,"108":4,"110":1,"117":1,"118":1,"124":1,"125":1,"179":1,"181":9,"183":4}}],["trying",{"2":{"12":1,"35":1,"41":1,"49":1,"179":1,"181":5}}],["trial",{"2":{"183":1}}],["trims",{"2":{"181":1}}],["tries",{"2":{"179":1,"181":2,"183":3}}],["triple",{"2":{"130":1,"131":1,"157":1,"181":1}}],["trivially",{"2":{"88":1}}],["trigram",{"2":{"67":3,"181":1,"183":8}}],["trigramannotater",{"2":{"67":4,"181":1,"183":10}}],["trigrams",{"2":{"57":4,"67":6,"181":2,"183":23}}],["triggers",{"2":{"52":1,"108":1,"179":2}}],["triggered",{"2":{"49":1,"52":1,"179":1}}],["trigger",{"2":{"10":1,"49":1,"52":2,"106":1,"179":3,"181":2}}],["tricks",{"2":{"78":2}}],["trick",{"2":{"10":1,"18":1,"41":1,"106":1,"181":5}}],["tuning",{"2":{"91":1,"181":1}}],["tune",{"0":{"91":1}}],["tuned",{"2":{"31":1}}],["tuple",{"2":{"45":1,"179":1,"181":39,"183":5}}],["tuples",{"2":{"18":1,"171":1,"172":1,"181":5}}],["turn",{"0":{"87":1},"2":{"35":2,"42":1,"98":1,"181":4}}],["turbo",{"2":{"7":1,"15":3,"93":1,"97":2,"107":1,"108":2,"181":11}}],["tutorials",{"2":{"61":2}}],["tutorial",{"2":{"8":1,"72":1,"95":1,"96":1}}],["t",{"2":{"6":1,"8":1,"24":1,"27":1,"28":1,"30":1,"37":1,"39":1,"52":2,"85":1,"103":1,"108":2,"110":3,"114":1,"115":1,"117":5,"118":3,"120":1,"155":2,"156":1,"157":1,"170":1,"172":1,"174":1,"178":1,"179":5,"181":37,"183":23}}],["tweak",{"2":{"2":2,"7":1,"52":2,"179":2}}],["two",{"0":{"2":1},"2":{"2":1,"5":4,"6":1,"7":7,"10":1,"16":1,"17":3,"21":1,"51":2,"52":2,"57":2,"58":2,"74":1,"88":5,"92":1,"107":1,"108":1,"130":1,"179":4,"181":19,"183":6}}],["taking",{"2":{"126":1,"128":1}}],["taken",{"2":{"181":2}}],["takes",{"2":{"74":1,"181":1}}],["take",{"2":{"7":1,"24":2,"102":1,"107":1,"131":1,"132":1,"155":1,"159":1,"177":1,"179":1,"181":1}}],["target",{"2":{"67":2,"140":1,"165":1,"167":2,"174":2,"183":14}}],["tapestry",{"2":{"58":2,"181":2}}],["tavilysearchrefiner",{"2":{"181":1,"183":6}}],["tavily",{"2":{"54":3,"55":1,"180":5,"181":5,"182":1,"183":3}}],["tall",{"2":{"19":2,"181":11}}],["tabular",{"2":{"13":1,"181":2}}],["table",{"2":{"7":2,"24":1,"77":1,"174":4}}],["tables",{"2":{"7":1}}],["task>",{"2":{"177":4}}],["tasked",{"2":{"126":1,"128":1}}],["task=",{"2":{"20":1,"181":2}}],["tasks",{"2":{"14":2,"15":1,"17":1,"19":1,"23":1,"26":1,"30":1,"34":1,"42":1,"46":2,"61":1,"67":2,"69":1,"98":1,"136":1,"138":1,"157":1,"162":1,"179":1,"181":3,"183":8}}],["task",{"0":{"154":1},"1":{"155":1,"156":1,"157":1},"2":{"11":1,"19":1,"24":1,"61":1,"91":1,"93":1,"104":1,"105":1,"108":3,"117":1,"118":1,"121":1,"124":1,"125":1,"126":2,"127":1,"128":1,"130":1,"131":1,"136":1,"138":1,"140":1,"141":1,"142":1,"149":1,"150":6,"152":1,"153":4,"161":1,"162":4,"169":7,"170":1,"171":6,"172":6,"174":5,"177":6,"178":1,"181":6}}],["tag2",{"2":{"67":1,"183":1}}],["tag1",{"2":{"67":1,"183":1}}],["tagging",{"2":{"67":2,"183":4}}],["tagger=opentagger",{"2":{"67":1,"183":1}}],["tagger",{"2":{"67":27,"183":43}}],["tag",{"2":{"2":1,"61":1,"66":1,"67":4,"183":30}}],["tags",{"2":{"2":3,"64":4,"66":8,"67":17,"144":2,"145":1,"177":3,"178":2,"181":9,"183":108}}],["tailored",{"2":{"1":1,"140":1,"150":1}}],["txtouterjoin",{"2":{"7":1}}],["txtrightjoin",{"2":{"7":1}}],["txtleftjoin",{"2":{"7":1}}],["txtinnerjoin",{"2":{"7":1}}],["txtin",{"2":{"7":1}}],["txtwe",{"2":{"7":1}}],["txtjulia",{"2":{"7":3}}],["txtdatabase",{"2":{"7":1}}],["txt",{"2":{"2":2,"5":1,"6":1}}],["tokyo",{"2":{"181":12}}],["tokenizes",{"2":{"183":2}}],["tokenized",{"2":{"183":2}}],["tokenizers",{"2":{"183":1}}],["tokenizer",{"2":{"181":1}}],["tokenize",{"2":{"57":2,"181":1,"183":2}}],["tokens=",{"2":{"181":1}}],["tokens=2500",{"2":{"20":1,"181":2}}],["tokens",{"2":{"11":1,"20":2,"22":1,"23":1,"26":1,"30":1,"31":1,"36":1,"67":2,"74":4,"77":1,"87":1,"97":2,"98":1,"181":70,"183":21}}],["token",{"2":{"10":1,"18":1,"29":1,"67":1,"77":1,"106":1,"179":1,"181":64,"183":11}}],["toml",{"2":{"80":1,"181":2}}],["touches",{"2":{"66":1}}],["total",{"2":{"61":1,"67":6,"179":1,"181":1,"183":14}}],["toy",{"2":{"52":1,"108":1,"179":1}}],["tone",{"2":{"24":1,"140":2,"163":1}}],["took",{"2":{"52":1,"108":1,"179":1,"183":1}}],["too",{"0":{"75":1},"2":{"16":1,"47":1,"52":1,"58":1,"108":1,"120":1,"155":2,"171":1,"172":1,"179":2,"181":5}}],["toolmessage",{"2":{"181":2}}],["tool",{"2":{"12":1,"21":1,"22":1,"106":1,"108":4,"144":4,"181":130}}],["tools",{"0":{"48":1,"59":1},"1":{"49":1,"50":1,"51":1,"52":1,"60":1,"61":1,"62":1,"63":1,"64":1,"65":1,"66":1,"67":1},"2":{"10":1,"58":2,"60":1,"61":1,"108":2,"181":34}}],["today",{"2":{"7":1,"22":1,"23":1,"26":1,"30":1,"40":1,"42":1,"87":1,"181":8}}],["topics",{"2":{"23":1,"26":1,"30":1,"31":1,"163":1}}],["topic",{"2":{"5":2,"6":1,"7":2,"161":6,"164":5,"167":5}}],["top",{"2":{"2":1,"6":2,"7":12,"52":1,"60":1,"61":1,"63":1,"65":4,"67":22,"85":1,"174":1,"179":1,"181":6,"183":46}}],["to",{"0":{"18":1,"78":1,"87":1,"88":1,"89":1,"92":1},"2":{"0":1,"1":3,"2":11,"3":1,"4":2,"5":4,"6":5,"7":17,"8":2,"10":21,"11":13,"12":9,"13":6,"14":2,"15":4,"16":4,"17":3,"18":2,"19":6,"21":11,"22":1,"23":6,"24":20,"25":1,"26":5,"27":1,"28":6,"29":7,"30":3,"31":6,"32":1,"34":5,"35":6,"36":1,"37":7,"39":3,"40":2,"41":2,"42":6,"43":1,"46":1,"47":1,"48":1,"49":16,"51":11,"52":100,"54":3,"55":9,"56":1,"57":14,"58":43,"59":6,"60":9,"61":20,"63":9,"64":1,"65":4,"66":14,"67":159,"69":1,"70":1,"71":7,"72":2,"73":2,"74":4,"75":3,"76":4,"77":2,"78":6,"79":5,"80":1,"82":2,"83":3,"84":3,"85":3,"86":6,"87":8,"88":18,"89":17,"90":2,"91":4,"92":11,"93":6,"95":9,"96":1,"97":4,"98":2,"99":3,"100":3,"101":1,"102":9,"103":6,"105":6,"106":21,"107":7,"108":31,"110":2,"112":4,"114":3,"117":8,"118":8,"120":4,"121":5,"122":2,"124":4,"125":3,"126":3,"127":6,"128":1,"130":10,"131":5,"132":2,"136":2,"138":5,"140":10,"141":5,"142":7,"144":2,"145":1,"147":1,"150":2,"152":2,"155":14,"156":10,"157":6,"161":1,"163":1,"164":1,"165":2,"166":1,"167":5,"169":1,"170":4,"171":2,"172":2,"173":1,"174":5,"177":1,"178":4,"179":146,"180":10,"181":843,"182":3,"183":431}}],["together",{"0":{"30":1},"2":{"0":1,"2":1,"5":3,"6":1,"7":3,"19":1,"30":3,"36":1,"67":1,"70":1,"93":1,"99":1,"108":6,"114":1,"181":7,"183":2}}],["togetheropenaischema",{"2":{"0":1,"30":2,"108":1,"181":2}}],["thomsonsampling",{"2":{"179":1}}],["thompson",{"2":{"179":3}}],["thompsonsampling",{"2":{"179":6,"181":1}}],["thoroughly",{"2":{"120":1}}],["thought",{"2":{"144":1,"169":1,"171":1,"177":1,"181":2}}],["though",{"2":{"108":1}}],["those",{"2":{"61":1,"67":1,"97":1,"144":1,"145":1,"147":1,"156":1,"181":2,"183":1}}],["than",{"2":{"28":1,"58":2,"67":2,"98":2,"108":1,"130":1,"163":1,"181":8,"183":8}}],["thanks",{"2":{"105":1,"179":2}}],["thank",{"2":{"12":1}}],["that",{"2":{"0":2,"3":1,"5":1,"6":3,"7":15,"10":2,"13":2,"15":3,"16":1,"18":1,"19":1,"21":4,"22":3,"23":3,"24":6,"26":2,"32":1,"36":2,"37":2,"42":2,"48":1,"49":5,"51":4,"52":22,"54":1,"58":6,"59":2,"61":1,"63":3,"65":3,"66":1,"67":23,"69":1,"72":1,"74":4,"75":1,"84":3,"87":2,"88":1,"89":1,"91":1,"92":2,"93":1,"95":1,"96":1,"99":1,"100":3,"101":2,"102":1,"103":2,"105":4,"106":3,"107":2,"108":10,"110":1,"112":1,"117":2,"118":2,"120":2,"122":1,"124":3,"125":2,"126":5,"128":1,"130":1,"136":2,"138":3,"140":1,"141":1,"142":3,"150":2,"152":1,"155":3,"156":1,"157":2,"161":1,"163":1,"165":1,"167":2,"169":1,"171":2,"172":2,"174":2,"177":1,"179":25,"181":103,"182":1,"183":68}}],["third",{"2":{"21":1,"51":1,"52":1,"179":1}}],["think",{"2":{"101":1,"108":1,"130":1,"131":2,"132":1,"165":1,"167":1,"169":1,"170":1,"177":1,"178":1,"181":2,"183":1}}],["thinking",{"2":{"21":2,"51":2,"52":3,"179":3}}],["things",{"2":{"12":1,"35":1}}],["this",{"0":{"6":1},"2":{"0":2,"1":2,"2":1,"3":1,"4":1,"6":2,"7":2,"8":1,"10":1,"11":2,"13":1,"17":1,"21":2,"24":3,"32":1,"35":1,"37":1,"41":2,"47":1,"49":2,"52":10,"54":2,"56":1,"58":4,"61":1,"63":1,"65":2,"67":6,"71":1,"73":2,"74":1,"75":2,"78":1,"79":1,"89":1,"93":1,"95":1,"96":1,"99":2,"104":1,"106":1,"107":2,"108":2,"124":1,"126":4,"127":1,"128":1,"130":1,"131":1,"132":1,"142":1,"155":2,"156":2,"157":3,"171":2,"172":2,"174":2,"179":15,"181":81,"182":2,"183":32}}],["throw==false",{"2":{"52":1,"179":1}}],["throw=true",{"2":{"52":2,"179":2}}],["thrown",{"2":{"52":1,"73":1,"179":1}}],["throw",{"2":{"52":4,"108":1,"179":4,"181":6}}],["throughout",{"2":{"96":1}}],["through",{"2":{"0":1,"7":1,"11":1,"35":1,"52":1,"58":1,"99":1,"114":1,"169":1,"170":1,"177":1,"178":1,"179":3,"181":5,"183":2}}],["thread",{"2":{"87":1,"93":2,"181":22}}],["threads`",{"2":{"61":1}}],["threads",{"2":{"46":1,"67":7,"181":1,"183":28}}],["threshold",{"2":{"67":1,"183":4}}],["three",{"2":{"12":1,"17":1,"57":1,"63":1,"85":1,"88":1,"140":2,"141":2,"142":2,"181":4,"183":3}}],["then",{"2":{"10":2,"11":1,"12":1,"19":1,"28":1,"37":1,"47":1,"52":2,"58":1,"63":1,"66":1,"67":4,"87":1,"88":2,"89":2,"93":1,"99":1,"106":1,"107":2,"179":1,"181":14,"183":13}}],["theory",{"2":{"7":1,"174":1}}],["their",{"2":{"7":2,"10":1,"23":1,"26":1,"31":1,"52":1,"58":1,"61":2,"67":3,"71":1,"102":1,"106":1,"112":3,"115":1,"136":1,"138":1,"140":1,"141":1,"179":2,"181":14,"183":12}}],["there",{"2":{"7":2,"19":1,"23":3,"24":1,"26":2,"27":1,"30":1,"31":1,"34":1,"37":1,"39":2,"40":1,"41":1,"42":2,"52":5,"54":1,"57":1,"58":3,"66":1,"67":2,"69":1,"70":1,"73":1,"78":1,"84":1,"85":1,"92":2,"101":1,"102":1,"104":1,"105":1,"107":2,"130":2,"131":1,"163":1,"179":5,"181":18,"183":4}}],["themselves",{"2":{"181":1}}],["themed",{"2":{"161":1}}],["theme",{"0":{"158":2,"159":2},"1":{"160":2,"161":2,"162":2,"163":2,"164":2,"165":2,"166":2,"167":2,"168":2,"169":2,"170":2,"171":2,"172":2,"173":2,"174":2,"175":2,"176":2,"177":2,"178":2},"2":{"157":6,"161":7}}],["themes",{"2":{"152":1,"157":5,"161":1}}],["them",{"2":{"2":4,"3":1,"6":1,"7":1,"8":1,"10":1,"12":1,"16":1,"21":1,"23":2,"24":6,"26":1,"27":1,"36":1,"37":1,"52":4,"57":1,"58":3,"61":1,"63":1,"65":1,"67":5,"77":1,"78":1,"101":1,"105":1,"106":2,"108":1,"130":1,"155":3,"170":1,"171":1,"172":1,"174":1,"178":1,"179":4,"181":25,"183":6}}],["they",{"2":{"1":1,"10":1,"20":1,"21":2,"23":2,"24":2,"26":1,"27":1,"52":4,"57":1,"67":1,"103":1,"106":2,"108":1,"141":1,"155":1,"156":1,"159":2,"174":2,"179":4,"181":9,"183":3}}],["these",{"2":{"0":1,"10":1,"15":1,"37":1,"52":1,"58":1,"61":3,"67":2,"74":1,"78":1,"106":1,"114":1,"120":1,"121":1,"140":1,"142":2,"144":1,"145":1,"147":1,"156":3,"165":1,"167":1,"179":1,"181":5,"183":3}}],["the",{"0":{"7":1,"75":1,"79":1,"80":1,"81":1,"85":1,"92":1},"2":{"0":13,"1":4,"2":24,"3":3,"4":2,"5":7,"6":9,"7":91,"8":6,"10":39,"11":11,"12":8,"13":12,"14":2,"15":7,"16":5,"17":5,"18":6,"19":10,"20":7,"21":22,"22":5,"23":16,"24":49,"25":1,"26":9,"27":8,"28":12,"29":6,"30":4,"31":9,"32":3,"33":2,"34":3,"35":5,"36":5,"37":4,"39":1,"41":6,"42":3,"43":3,"45":1,"46":1,"48":1,"49":33,"51":21,"52":194,"53":1,"55":15,"56":1,"57":14,"58":98,"59":9,"60":13,"61":48,"63":34,"64":2,"65":6,"66":29,"67":247,"69":3,"70":3,"71":5,"72":3,"73":15,"74":11,"75":3,"76":1,"77":4,"78":8,"79":7,"80":2,"82":1,"83":2,"84":3,"85":4,"86":9,"87":19,"88":18,"89":20,"90":3,"91":4,"92":19,"93":14,"95":6,"96":4,"97":14,"98":8,"99":5,"100":9,"101":4,"102":9,"103":5,"104":13,"105":9,"106":37,"107":35,"108":75,"110":6,"112":10,"114":10,"115":3,"117":26,"118":32,"120":17,"121":20,"122":5,"124":6,"125":6,"126":11,"127":7,"128":4,"130":31,"131":12,"132":2,"136":6,"137":2,"138":13,"140":21,"141":19,"142":26,"144":6,"145":4,"147":3,"149":1,"150":4,"152":9,"153":2,"155":14,"156":20,"157":17,"159":1,"160":1,"161":8,"163":15,"164":2,"165":7,"166":1,"167":9,"168":3,"169":5,"170":12,"171":6,"172":7,"173":1,"174":22,"176":4,"177":6,"178":13,"179":339,"180":16,"181":1653,"182":3,"183":841}}],["f1",{"2":{"183":2}}],["fn",{"2":{"181":2}}],["ffs",{"2":{"130":1,"131":1}}],["f2",{"2":{"52":2,"179":2,"183":2}}],["fmixtral",{"2":{"31":2}}],["f",{"2":{"21":2,"31":1,"51":2,"52":11,"108":1,"171":1,"172":1,"179":16,"181":7}}],["fences",{"2":{"181":2}}],["fence",{"2":{"130":1,"131":1,"181":2}}],["fear",{"2":{"41":1}}],["features",{"2":{"77":1,"142":1,"174":4,"181":2}}],["feature",{"2":{"17":1,"21":1,"174":7,"181":1}}],["february",{"2":{"181":1}}],["feb",{"2":{"31":1,"36":1}}],["feedbackfromevaluator",{"0":{"134":1},"2":{"179":3}}],["feedback",{"0":{"133":1},"1":{"134":1},"2":{"21":6,"49":6,"51":8,"52":55,"88":2,"108":5,"130":9,"131":4,"132":4,"134":5,"179":125,"181":4}}],["feel",{"2":{"23":1,"26":1,"31":1,"34":1,"42":1,"57":1,"60":1,"181":1}}],["feels",{"2":{"12":1}}],["feelings",{"2":{"12":1,"35":2,"41":2,"181":8}}],["fewer",{"2":{"183":1}}],["few",{"2":{"2":2,"6":1,"11":1,"86":1,"130":1,"155":1,"163":1,"179":2,"181":6}}],["flexibility",{"2":{"179":1}}],["flexible",{"2":{"52":2,"59":1,"179":2,"181":1}}],["fleming",{"2":{"120":3}}],["float",{"2":{"181":1,"183":5}}],["float32",{"2":{"61":4,"183":5}}],["float64",{"2":{"16":1,"19":1,"22":2,"45":2,"46":2,"47":2,"67":13,"179":1,"181":26,"183":35}}],["float64int64float64dict",{"2":{"7":1}}],["flow",{"2":{"64":4,"67":2,"88":1,"181":5,"183":3}}],["flowed",{"2":{"58":1,"181":1}}],["flows",{"2":{"35":1}}],["flashrank",{"2":{"183":4}}],["flashranker",{"2":{"181":1,"183":3}}],["flag",{"2":{"88":1,"179":2,"181":3,"183":10}}],["flags",{"2":{"15":1,"97":1}}],["flavors",{"2":{"10":1,"49":1,"106":1,"181":2}}],["flavor",{"2":{"0":1,"181":21,"183":1}}],["fruit",{"2":{"181":2}}],["friendly",{"2":{"30":1,"163":2}}],["francisco",{"2":{"19":1}}],["france",{"2":{"15":1,"67":2,"97":3,"107":4,"183":7}}],["frameworks",{"2":{"61":1}}],["frame",{"2":{"7":8}}],["frames",{"2":{"6":1,"7":7}}],["frequencies",{"2":{"183":1}}],["frequently",{"0":{"68":1},"1":{"69":1,"70":1,"71":1,"72":1,"73":1,"74":1,"75":1,"76":1,"77":1,"78":1,"79":1,"80":1,"81":1,"82":1,"83":1,"84":1,"85":1,"86":1,"87":1,"88":1,"89":1,"90":1,"91":1,"92":1,"93":1},"2":{"37":1,"181":1}}],["free",{"2":{"23":1,"26":1,"31":2,"34":1,"36":1,"42":1,"57":1,"60":1,"70":1,"74":2,"75":1,"77":1,"181":5,"183":1}}],["freedom",{"2":{"12":1}}],["french",{"2":{"14":1,"93":1,"181":3}}],["from",{"0":{"74":1,"82":1},"2":{"2":5,"3":1,"6":2,"7":13,"8":1,"10":6,"11":2,"12":2,"19":2,"20":1,"23":1,"25":1,"26":1,"31":1,"32":1,"35":1,"37":2,"41":1,"49":1,"51":1,"52":15,"55":2,"57":2,"58":2,"59":1,"60":4,"61":2,"63":2,"64":1,"66":2,"67":17,"72":1,"78":1,"79":1,"82":2,"87":1,"88":1,"95":1,"97":1,"104":3,"106":4,"107":1,"108":6,"114":1,"115":2,"117":1,"118":4,"120":2,"121":3,"122":1,"124":1,"125":1,"126":1,"127":1,"128":2,"130":2,"134":2,"136":1,"138":1,"142":1,"150":1,"153":1,"155":1,"156":1,"157":3,"163":1,"171":1,"172":1,"174":2,"179":29,"180":2,"181":121,"183":57}}],["fairly",{"2":{"181":1}}],["fail",{"2":{"52":1,"179":3,"181":4}}],["failure",{"2":{"21":1,"49":3,"51":1,"52":1,"179":3}}],["failures",{"2":{"21":1,"51":1,"52":2,"179":1,"181":3}}],["fails",{"2":{"21":3,"49":1,"51":3,"52":2,"179":3,"181":8}}],["failedresponse",{"2":{"88":3}}],["failed",{"2":{"7":2,"19":1,"52":1,"88":2,"179":1,"181":3,"183":1}}],["favors",{"2":{"179":1}}],["favorite",{"2":{"84":1,"93":1}}],["far",{"2":{"179":2}}],["famous",{"2":{"167":1}}],["familiar",{"2":{"1":1}}],["faq",{"2":{"71":1,"95":1,"105":1}}],["fallback",{"2":{"181":7}}],["falls",{"2":{"52":1,"179":1}}],["fall",{"2":{"52":3,"179":3}}],["false`",{"2":{"52":1,"179":1}}],["false",{"2":{"2":1,"7":2,"17":2,"21":1,"51":1,"52":13,"55":3,"67":2,"137":2,"179":14,"180":3,"181":77,"183":16}}],["fahrenheit",{"2":{"19":1,"181":9}}],["faster",{"2":{"21":1,"28":1,"46":1,"51":1,"52":1,"67":1,"179":1,"183":1}}],["fast",{"2":{"18":1,"28":1,"181":1,"183":1}}],["face",{"2":{"42":1}}],["facilitating",{"2":{"181":2}}],["facilitate",{"2":{"10":1,"49":1,"52":1,"66":1,"106":1,"179":1,"181":2}}],["facing",{"2":{"15":1}}],["facts",{"2":{"172":1}}],["fact",{"2":{"10":1,"17":1,"49":1,"106":1}}],["focused",{"2":{"130":1,"156":1,"167":1}}],["focus",{"2":{"126":1,"163":1,"174":1,"179":1,"181":2}}],["focusing",{"2":{"61":1,"140":1}}],["four",{"2":{"17":1,"88":2,"181":2}}],["foundation",{"0":{"29":1},"2":{"29":1,"181":3}}],["found",{"2":{"7":1,"52":2,"67":2,"82":1,"179":1,"181":22,"183":6}}],["food",{"2":{"10":1,"31":5,"106":1,"108":24}}],["footers",{"2":{"2":1}}],["follow",{"2":{"86":1,"87":1,"130":1,"131":1,"140":2,"141":2,"142":2,"155":2,"157":1,"159":1,"163":4,"169":2,"174":1,"177":1,"181":1}}],["followed",{"2":{"24":1,"67":1,"183":1}}],["follows",{"2":{"7":1,"48":1,"53":1,"59":1,"117":1,"130":1,"181":1,"183":3}}],["following",{"2":{"5":1,"7":2,"11":1,"17":1,"24":1,"52":1,"57":1,"61":2,"82":1,"90":2,"96":1,"100":1,"117":1,"118":1,"131":1,"163":1,"179":2,"181":11,"183":1}}],["folder",{"2":{"2":1,"11":3,"13":1,"89":4,"181":4}}],["forget",{"2":{"181":1,"183":1}}],["forward",{"2":{"181":1}}],["forwarded",{"2":{"67":12,"183":16}}],["forbidden",{"2":{"170":1,"178":1}}],["forum",{"2":{"76":1}}],["forefront",{"2":{"69":1}}],["forever",{"2":{"58":1,"181":1}}],["formulate",{"2":{"120":1}}],["form",{"2":{"71":1,"181":1,"183":10}}],["former",{"2":{"58":1,"181":1}}],["forms",{"2":{"52":1,"179":1,"183":1}}],["format=",{"2":{"181":2}}],["format=dict",{"2":{"108":2}}],["formatting",{"2":{"51":1,"66":1,"92":1,"144":1,"145":1,"147":1,"153":1,"155":1,"156":1,"157":1,"163":1,"181":2,"183":3}}],["formatted",{"0":{"143":1,"175":1},"1":{"144":1,"145":1,"176":1,"177":1,"178":1},"2":{"0":2,"91":1,"92":1,"102":1,"107":1,"108":1,"144":1,"145":1,"157":1,"176":1,"177":1,"178":1,"179":1,"183":1}}],["format",{"2":{"10":1,"21":1,"51":1,"52":2,"66":1,"88":1,"91":2,"102":1,"104":1,"106":1,"107":2,"108":3,"112":1,"115":1,"130":1,"140":1,"141":1,"142":1,"144":1,"145":1,"147":1,"150":1,"155":1,"156":1,"157":2,"161":2,"163":3,"165":1,"167":2,"179":2,"181":16,"183":1}}],["forth",{"2":{"41":1}}],["fortunately",{"2":{"6":1,"78":1,"108":1}}],["forces",{"2":{"144":1}}],["force=true",{"2":{"73":1}}],["force",{"2":{"12":1,"18":1,"24":1,"35":2,"41":1,"73":2,"181":3}}],["for",{"0":{"45":1,"77":1,"79":1,"84":1,"107":1,"108":1,"179":1,"180":1,"182":1,"183":1},"2":{"0":6,"2":6,"3":1,"4":2,"5":1,"6":2,"7":14,"8":1,"10":9,"12":2,"13":2,"14":2,"15":2,"16":1,"17":3,"18":7,"19":2,"21":7,"22":1,"23":5,"24":14,"26":2,"27":1,"30":5,"31":8,"32":2,"33":1,"35":2,"36":3,"37":3,"39":1,"41":2,"42":2,"47":1,"48":1,"49":6,"51":5,"52":30,"53":1,"54":1,"55":3,"57":2,"58":12,"59":1,"60":3,"61":28,"63":6,"65":3,"66":5,"67":65,"69":2,"71":2,"72":1,"74":1,"75":1,"77":4,"78":4,"79":3,"81":1,"82":1,"83":1,"84":1,"85":1,"86":3,"88":5,"89":4,"90":3,"91":2,"92":4,"93":1,"95":3,"97":3,"98":3,"99":1,"101":2,"102":2,"105":2,"106":10,"107":7,"108":13,"110":1,"114":2,"115":1,"117":1,"118":2,"120":4,"121":2,"122":1,"124":2,"125":1,"126":5,"127":2,"128":3,"130":3,"131":3,"136":2,"138":3,"140":1,"141":3,"142":2,"144":4,"145":4,"147":3,"149":2,"150":1,"152":2,"155":2,"156":6,"157":3,"160":1,"161":1,"162":1,"163":8,"164":1,"165":2,"166":1,"167":2,"168":1,"169":1,"170":7,"171":4,"172":4,"173":1,"174":1,"176":2,"177":2,"178":8,"179":65,"180":3,"181":407,"182":4,"183":267}}],["five",{"2":{"181":1}}],["fits",{"2":{"99":1,"136":1,"138":2}}],["fit",{"2":{"57":1,"58":2,"174":1,"181":2,"183":3}}],["fixes",{"2":{"179":1}}],["fixed",{"2":{"88":1,"179":1,"181":2}}],["fix",{"2":{"49":2,"52":1,"73":1,"130":1,"132":1,"179":4,"181":1}}],["fixing",{"0":{"51":1,"129":1},"1":{"130":1,"131":1,"132":1},"2":{"10":1,"48":1,"49":1,"52":4,"106":1,"108":1,"179":21}}],["field3",{"2":{"181":4}}],["field2",{"2":{"181":4}}],["field1",{"2":{"181":8}}],["fieldnames",{"2":{"108":1}}],["fields",{"2":{"24":1,"52":4,"63":1,"86":1,"88":1,"107":1,"108":2,"140":1,"141":1,"142":1,"179":6,"181":39,"183":21}}],["field",{"2":{"10":5,"49":1,"52":2,"67":1,"97":1,"106":6,"108":8,"179":3,"181":24,"183":2}}],["finished",{"2":{"181":2}}],["finish",{"2":{"141":2,"181":5}}],["finance",{"2":{"61":1}}],["finalizes",{"2":{"181":3}}],["finalize",{"2":{"181":18}}],["finally",{"2":{"66":1,"181":1}}],["final",{"2":{"6":1,"52":2,"61":2,"66":1,"67":5,"107":1,"121":1,"179":1,"183":21}}],["finetuning",{"2":{"91":5,"181":2}}],["fine",{"0":{"91":1},"2":{"31":1,"91":1,"181":1}}],["finders",{"2":{"183":1}}],["finder",{"2":{"67":10,"183":23}}],["findings",{"2":{"114":1}}],["finding",{"2":{"41":1,"67":2,"183":2}}],["find",{"2":{"2":1,"8":1,"11":1,"12":1,"13":1,"23":1,"26":1,"29":1,"35":1,"41":2,"49":1,"52":1,"57":2,"58":3,"61":5,"64":2,"66":3,"67":7,"93":1,"179":3,"181":21,"183":51}}],["finds",{"2":{"2":1,"67":1,"179":1,"183":14}}],["filled",{"2":{"181":2}}],["fill",{"2":{"71":1,"108":1,"163":1,"181":14,"183":1}}],["fills",{"2":{"7":1}}],["filechunker",{"2":{"181":1,"183":4}}],["filenames",{"2":{"67":1,"183":1}}],["filename",{"2":{"24":2,"91":1,"181":2}}],["file",{"2":{"4":1,"11":6,"24":3,"28":1,"32":1,"37":1,"67":4,"73":1,"79":1,"91":2,"93":1,"155":1,"156":1,"157":1,"181":23,"183":10}}],["files`",{"2":{"67":1,"183":1}}],["files",{"2":{"2":2,"24":1,"64":1,"67":6,"72":1,"95":1,"183":14}}],["filtered",{"2":{"61":1,"67":2,"183":4}}],["filtering",{"2":{"8":1,"17":1,"66":2,"67":2,"183":6}}],["filter",{"2":{"2":2,"7":1,"67":7,"114":2,"181":1,"183":13}}],["filters",{"2":{"2":2,"8":1,"66":1}}],["fired",{"2":{"170":1,"178":1}}],["firefunction",{"2":{"31":2}}],["fireworks",{"0":{"31":1},"2":{"0":1,"23":1,"27":1,"31":4,"70":1,"108":1,"181":2}}],["fireworksopenaischema",{"2":{"0":1,"31":2,"181":2}}],["first",{"2":{"1":1,"2":1,"6":1,"7":7,"10":2,"13":1,"15":1,"16":1,"23":1,"24":2,"26":1,"28":1,"37":1,"39":1,"41":1,"52":2,"58":3,"63":2,"66":1,"67":2,"73":2,"74":1,"88":1,"89":3,"92":2,"106":2,"107":1,"112":1,"144":1,"165":1,"167":1,"169":1,"171":1,"172":1,"177":1,"179":3,"181":27,"183":14}}],["fur",{"2":{"181":1}}],["furthermore",{"2":{"108":1}}],["further",{"2":{"58":2,"88":2,"179":1,"181":2}}],["fusion",{"2":{"181":2,"183":8}}],["fulfills",{"2":{"142":1}}],["fulfilling",{"2":{"130":1}}],["fully",{"2":{"61":1,"67":1,"88":1,"142":3,"183":1}}],["full",{"2":{"7":1,"67":3,"88":1,"181":9,"183":10}}],["fuzzy",{"2":{"58":1,"181":1}}],["functor",{"2":{"52":1,"179":2}}],["functionality",{"2":{"48":1,"59":1,"142":1,"170":1,"178":1,"179":1,"181":3,"182":3,"183":3}}],["functionalities",{"2":{"0":1,"142":1,"181":1}}],["function",{"0":{"47":1},"2":{"6":1,"7":2,"8":1,"10":4,"12":1,"16":1,"17":1,"20":1,"21":5,"22":2,"24":1,"31":1,"45":1,"47":1,"49":2,"51":6,"52":37,"54":2,"55":1,"58":12,"61":2,"63":2,"64":1,"66":1,"67":20,"73":2,"87":1,"88":2,"89":1,"91":1,"92":1,"102":1,"106":4,"108":10,"112":1,"114":1,"130":3,"142":1,"145":3,"147":3,"170":3,"178":3,"179":58,"181":108,"183":42}}],["functions",{"0":{"10":1,"106":1},"2":{"0":2,"2":1,"7":1,"10":5,"15":1,"21":2,"23":1,"24":1,"27":1,"37":1,"49":4,"51":1,"52":7,"57":2,"60":1,"61":2,"63":5,"64":1,"66":1,"67":1,"73":1,"82":1,"85":1,"87":1,"89":2,"92":1,"105":1,"106":5,"169":1,"170":1,"171":1,"172":1,"177":1,"178":1,"179":8,"181":15,"183":12}}],["func",{"2":{"52":2,"179":4,"183":1}}],["future",{"2":{"1":1,"4":2,"23":1,"61":1,"89":1,"108":1,"179":1,"181":2,"182":1,"183":1}}],["ml",{"2":{"174":1}}],["mm",{"2":{"155":2,"156":3}}],["mdoel",{"2":{"107":1}}],["mdash",{"2":{"52":9,"55":1,"58":5,"67":6,"179":37,"180":2,"181":184,"182":1,"183":142}}],["m1",{"2":{"37":1}}],["mcts",{"2":{"21":1,"49":1,"52":1,"179":1}}],["m",{"0":{"78":1},"2":{"21":2,"23":1,"26":1,"28":3,"30":1,"31":2,"34":1,"37":1,"42":1,"51":2,"52":3,"87":3,"108":1,"179":3,"181":5}}],["msg1",{"2":{"181":2}}],["msgs",{"2":{"181":1}}],["msg=aigenerate",{"2":{"181":7}}],["msg",{"2":{"12":2,"16":5,"19":4,"20":2,"22":4,"23":2,"26":1,"27":1,"29":1,"31":2,"35":1,"40":2,"41":1,"45":1,"46":1,"47":2,"52":12,"61":1,"67":2,"78":2,"88":4,"93":5,"107":3,"108":2,"179":9,"181":152,"183":6}}],["myfield",{"2":{"181":1}}],["myfunction",{"2":{"181":2}}],["mystruct",{"2":{"181":1}}],["myschema",{"2":{"86":2}}],["mytemplates",{"2":{"179":1}}],["mytype",{"2":{"88":1}}],["myaijudgemodel",{"2":{"183":1}}],["myadd",{"2":{"170":6,"178":6}}],["myabstractresponse",{"2":{"88":5}}],["mybool",{"2":{"88":2}}],["mymodel",{"2":{"86":1}}],["mymeasurementwrapper",{"2":{"181":1}}],["mymeasurement",{"2":{"19":5,"181":35}}],["myreranker",{"2":{"63":4,"67":2,"183":2}}],["my",{"0":{"92":1},"2":{"12":1,"13":1,"23":3,"26":2,"31":1,"34":1,"35":1,"41":1,"74":1,"84":1,"86":1,"87":3,"108":1,"181":14}}],["music",{"2":{"155":1,"156":1}}],["must",{"2":{"1":1,"12":2,"21":2,"37":1,"41":2,"51":2,"52":9,"67":1,"89":3,"95":1,"105":1,"108":3,"115":1,"117":1,"118":1,"121":1,"125":1,"130":3,"131":1,"136":2,"138":2,"152":1,"155":1,"156":1,"157":2,"159":1,"163":1,"167":1,"170":2,"174":1,"178":2,"179":13,"181":18,"182":1,"183":8}}],["murmured",{"2":{"58":1,"181":1}}],["mutates",{"2":{"67":1,"183":1}}],["mutated",{"2":{"67":1,"183":4}}],["mutating",{"2":{"52":1,"66":1,"67":1,"179":2,"183":2}}],["mutable",{"2":{"52":1,"181":3}}],["multihits",{"2":{"183":1}}],["multihop",{"2":{"183":1}}],["multicandidatechunks",{"2":{"181":1,"183":2}}],["multifinder",{"2":{"67":1,"181":1,"183":4}}],["multiindex",{"2":{"67":2,"181":1,"183":14}}],["multiplier",{"2":{"183":5}}],["multiplication",{"2":{"47":1}}],["multiple",{"0":{"46":1},"2":{"6":1,"8":1,"14":1,"21":1,"46":1,"52":3,"58":3,"63":1,"67":2,"88":1,"93":1,"98":1,"102":1,"105":1,"107":1,"108":1,"170":1,"171":2,"172":2,"178":1,"179":6,"181":27,"183":10}}],["multi",{"0":{"87":1},"2":{"35":1,"42":1,"67":3,"98":1,"179":2,"181":8,"183":7}}],["much",{"0":{"77":1},"2":{"7":2,"8":1,"13":1,"24":1,"41":1,"52":1,"67":2,"106":2,"108":1,"179":3,"181":3,"183":2}}],["mixed",{"2":{"108":1}}],["mix",{"2":{"106":1,"181":3,"183":1}}],["mixtral",{"2":{"28":1,"30":1,"31":2,"37":2,"108":2,"181":1}}],["million",{"2":{"98":2}}],["mickey",{"2":{"58":1,"181":1}}],["middleware",{"2":{"181":1}}],["middle",{"2":{"41":1,"112":1,"179":1,"183":1}}],["mimics",{"2":{"58":1,"181":1}}],["mimic",{"2":{"27":1,"88":1,"101":1,"179":2,"181":3}}],["mind",{"2":{"108":1}}],["minute",{"2":{"74":5}}],["minutes",{"2":{"11":2,"77":2,"181":3}}],["min",{"2":{"58":1,"67":8,"181":1,"183":12}}],["minimize",{"2":{"183":1}}],["minimal",{"2":{"59":1}}],["minimum",{"2":{"2":1,"58":2,"67":2,"78":1,"95":1,"181":2,"183":7}}],["minichunks",{"2":{"58":1,"181":1}}],["mini",{"2":{"52":2,"78":2,"179":2,"183":2}}],["mistakes",{"2":{"130":2}}],["mistrall",{"2":{"181":2}}],["mistralai",{"0":{"23":1,"26":1},"2":{"23":3,"25":1,"26":1,"27":1,"70":1,"79":2,"108":1,"181":5}}],["mistral",{"2":{"0":1,"22":3,"23":7,"26":7,"37":1,"40":1,"47":1,"84":2,"181":22}}],["mistralopenaischema",{"2":{"0":1,"23":2,"26":2,"181":4}}],["missing",{"2":{"7":1,"52":1,"141":1,"142":2,"163":1,"181":6}}],["might",{"2":{"7":1,"10":1,"23":2,"26":1,"52":2,"58":1,"75":2,"76":1,"79":1,"106":1,"179":2,"181":4}}],["madrid",{"2":{"97":1,"98":3}}],["made",{"2":{"49":1,"52":5,"120":1,"156":1,"179":6,"181":2}}],["mapped",{"2":{"181":1}}],["mapping",{"2":{"88":1,"181":4}}],["map",{"2":{"181":12,"183":1}}],["mapreduce",{"2":{"46":1}}],["magenta",{"2":{"61":1,"183":3}}],["mascarading",{"2":{"181":1}}],["mask",{"2":{"57":1}}],["mastering",{"2":{"61":1}}],["master",{"2":{"12":2,"35":1,"41":1,"181":5}}],["maintain",{"2":{"156":1,"181":2}}],["maintaining",{"2":{"153":1}}],["mainly",{"2":{"126":1}}],["main",{"2":{"49":2,"57":1,"60":1,"61":1,"63":3,"64":1,"67":1,"89":1,"104":1,"108":1,"161":1,"181":7,"182":1,"183":3}}],["machine",{"2":{"61":1}}],["machines",{"2":{"35":1}}],["mac",{"2":{"37":1,"79":1}}],["macros",{"2":{"52":1,"181":5}}],["macro",{"2":{"15":1,"34":1,"87":3,"97":1,"130":1,"181":8}}],["may",{"2":{"24":1,"34":1,"42":1,"89":2,"140":1,"142":1,"155":1,"179":1,"181":13,"182":1,"183":1}}],["maybeextract",{"2":{"19":1,"181":18}}],["marks",{"2":{"181":1,"183":1}}],["markup",{"2":{"181":1}}],["marked",{"2":{"120":1,"181":3}}],["markdown",{"2":{"20":3,"155":1,"156":1,"157":1,"163":1,"167":2,"181":20}}],["marsaglia",{"2":{"179":1}}],["mars",{"2":{"17":1,"181":2}}],["margin=",{"2":{"183":1}}],["margin",{"2":{"2":1,"183":4}}],["manner",{"2":{"155":1,"183":3}}],["management",{"2":{"181":1}}],["managed",{"2":{"179":1,"181":4}}],["manage",{"2":{"179":1}}],["manages",{"2":{"108":1,"179":1,"181":1}}],["manageable",{"2":{"58":1,"66":1,"179":1,"181":1}}],["managing",{"2":{"63":1}}],["manually",{"2":{"27":1,"28":1,"73":1}}],["manymeasurements",{"2":{"19":1,"181":2}}],["many",{"0":{"75":1},"2":{"19":1,"23":1,"27":1,"49":2,"57":1,"69":1,"70":1,"84":1,"88":1,"89":1,"102":1,"108":1,"124":1,"125":1,"181":8}}],["mandarin",{"2":{"14":1}}],["manipulations",{"2":{"162":1}}],["manipulation",{"2":{"2":1,"56":1,"67":1,"183":1}}],["matrices",{"2":{"183":5}}],["matrix",{"2":{"22":1,"46":2,"47":1,"66":1,"181":4,"183":29}}],["mat",{"2":{"183":2}}],["matlab",{"2":{"61":1}}],["matter",{"2":{"36":1}}],["materialized",{"2":{"183":1}}],["materialize",{"2":{"45":1,"181":1}}],["material",{"2":{"12":1}}],["matches",{"2":{"181":5,"183":2}}],["matched",{"2":{"124":1,"125":1,"128":1,"183":2}}],["match",{"2":{"7":2,"11":1,"24":3,"58":7,"61":5,"66":1,"67":9,"181":9,"183":16}}],["matching",{"2":{"7":5,"58":1,"61":1,"67":3,"181":1,"183":9}}],["maximize",{"2":{"183":1}}],["maximum",{"2":{"18":1,"21":2,"52":4,"55":1,"58":4,"74":1,"114":1,"179":6,"180":1,"181":16,"183":3}}],["maxes",{"2":{"74":1}}],["max",{"2":{"8":1,"20":1,"21":4,"51":2,"52":21,"55":1,"57":1,"58":21,"67":3,"88":1,"90":1,"108":1,"179":29,"180":1,"181":51,"183":14}}],["makie",{"2":{"67":1,"183":1}}],["making",{"2":{"0":1,"100":1,"105":1}}],["makes",{"2":{"37":1,"106":1,"155":1}}],["make",{"2":{"4":1,"6":1,"7":1,"8":2,"11":1,"29":1,"37":2,"52":2,"56":1,"67":3,"74":1,"79":2,"89":1,"95":2,"108":2,"110":1,"117":1,"118":1,"138":1,"155":3,"156":1,"165":1,"167":1,"179":2,"181":8,"183":7}}],["mention",{"2":{"155":1}}],["mentioning",{"2":{"142":1,"174":1}}],["mentioned",{"2":{"121":1,"126":1,"142":1,"174":1}}],["merely",{"2":{"92":1,"181":1}}],["merged",{"2":{"183":2}}],["merges",{"2":{"183":3}}],["merge",{"2":{"67":2,"181":1,"183":5}}],["memory`",{"2":{"108":1}}],["memory",{"2":{"61":1,"130":1,"162":1,"183":2}}],["memories",{"2":{"12":1,"58":1,"181":1}}],["melody",{"2":{"58":1,"181":1}}],["meetings",{"2":{"155":2,"156":2}}],["meeting",{"2":{"142":1}}],["meets",{"2":{"140":2,"142":1}}],["meet",{"2":{"39":1,"40":1,"42":1,"142":1}}],["messaging",{"2":{"140":1,"181":3}}],["message=true",{"2":{"78":2}}],["message",{"0":{"34":1,"39":1},"2":{"21":1,"49":2,"51":1,"52":18,"61":1,"67":1,"75":1,"78":1,"84":2,"87":2,"89":2,"93":3,"97":2,"102":3,"107":2,"108":3,"130":1,"131":2,"134":1,"140":1,"149":1,"179":21,"181":183,"183":4}}],["message`",{"2":{"21":1,"51":1,"52":1,"179":1}}],["messagese",{"2":{"181":1}}],["messages",{"0":{"104":1},"2":{"12":2,"23":1,"24":1,"27":1,"36":1,"52":1,"78":1,"87":1,"88":1,"89":1,"91":1,"92":5,"93":1,"100":1,"103":1,"104":2,"107":6,"108":1,"179":4,"181":56}}],["mesages",{"2":{"24":1}}],["mechanisms",{"2":{"106":1}}],["mechanism",{"2":{"23":1,"26":1,"58":1,"108":1,"181":1}}],["medium",{"2":{"23":1,"26":1,"183":4}}],["measuring",{"2":{"183":4}}],["measurement",{"2":{"181":1}}],["measurements",{"2":{"19":2,"181":16}}],["measures",{"2":{"58":2,"67":1,"181":2,"183":1}}],["meantime",{"2":{"181":1}}],["meant",{"2":{"130":1,"131":1,"181":2}}],["meaningful",{"2":{"155":1}}],["meaning",{"2":{"78":2,"127":1,"183":1}}],["means",{"2":{"18":1,"32":1,"37":1,"57":2,"61":1,"67":2,"74":1,"181":2,"183":3}}],["mean",{"2":{"1":1,"7":2,"183":2}}],["me",{"2":{"16":2,"22":3,"23":1,"26":1,"30":2,"31":2,"34":1,"39":1,"42":1,"45":2,"46":4,"47":2,"58":1,"82":1,"88":2,"89":4,"174":1,"181":12}}],["meticulously",{"2":{"144":1,"145":1,"147":1,"156":1}}],["meta",{"2":{"93":1,"181":12}}],["metaprogramming",{"2":{"61":1}}],["metadatamessage",{"2":{"181":2}}],["metadata=true",{"2":{"2":1,"8":1}}],["metadata",{"0":{"113":1},"1":{"114":1,"115":1},"2":{"2":4,"8":1,"66":1,"67":1,"93":5,"114":2,"115":1,"126":1,"181":39,"183":14}}],["met",{"2":{"52":12,"88":2,"108":1,"179":16}}],["methoderror",{"2":{"108":1}}],["methods",{"2":{"52":3,"66":1,"67":1,"100":1,"108":1,"179":3,"181":6,"183":3}}],["method",{"2":{"10":1,"49":2,"52":1,"58":1,"63":2,"67":14,"86":1,"100":1,"101":1,"106":1,"179":39,"180":2,"181":126,"183":158}}],["metrics",{"2":{"6":1}}],["move",{"2":{"181":1,"183":1}}],["moved",{"2":{"1":1,"179":1,"182":1,"183":1}}],["mock",{"2":{"179":1}}],["monitoring",{"2":{"181":2}}],["month",{"2":{"76":1}}],["monte",{"2":{"21":1,"49":1,"52":1,"179":3}}],["money",{"2":{"76":1,"181":1}}],["moonlight",{"2":{"58":2,"181":2}}],["mouse",{"2":{"58":1,"181":1}}],["modified",{"2":{"181":1,"183":1}}],["modifies",{"2":{"181":2}}],["modification",{"2":{"181":1}}],["modify",{"2":{"24":1}}],["modal",{"2":{"181":2}}],["modality",{"2":{"56":1}}],["modular",{"2":{"60":1,"179":1}}],["modules",{"2":{"57":1}}],["module",{"0":{"182":1},"2":{"1":1,"10":1,"21":1,"24":1,"48":2,"49":1,"52":4,"53":2,"54":1,"57":1,"59":2,"61":1,"106":1,"179":3,"181":4,"182":4,"183":2}}],["modes",{"2":{"183":1}}],["mode=true",{"2":{"181":1}}],["modern",{"2":{"120":2}}],["moderation",{"2":{"17":1}}],["mode",{"2":{"24":2,"67":1,"108":2,"181":28,"183":1}}],["model3",{"2":{"181":1}}],["model2",{"2":{"181":1}}],["model1",{"2":{"181":3}}],["modeling",{"2":{"61":1}}],["model>",{"2":{"27":1}}],["model=pt",{"2":{"67":1,"183":1}}],["model=",{"2":{"20":2,"21":1,"22":3,"23":3,"26":2,"27":1,"30":2,"31":3,"35":1,"37":1,"43":1,"51":1,"52":3,"65":1,"78":2,"93":3,"106":2,"107":1,"179":3,"181":47,"183":1}}],["modelspec",{"2":{"181":3}}],["models",{"0":{"22":1,"29":1,"37":1,"42":1,"78":1},"1":{"38":1,"39":1,"40":1,"41":1,"42":1,"43":1,"44":1,"45":1,"46":1,"47":1},"2":{"0":1,"10":1,"15":1,"17":1,"22":1,"23":4,"26":3,"28":2,"29":2,"32":1,"36":1,"37":4,"39":1,"42":1,"52":1,"53":1,"58":2,"67":2,"69":2,"70":1,"71":5,"75":1,"78":3,"84":3,"85":1,"86":1,"93":1,"98":1,"100":1,"101":1,"103":1,"106":1,"108":1,"144":1,"145":1,"171":1,"172":1,"176":1,"177":1,"178":1,"179":2,"181":38,"183":7}}],["model",{"0":{"0":1,"15":1,"85":1,"91":1,"101":1},"2":{"0":3,"6":2,"7":2,"10":8,"15":8,"16":1,"17":1,"18":1,"21":3,"22":1,"23":2,"26":1,"27":2,"28":4,"29":5,"30":4,"31":6,"32":2,"33":1,"34":2,"35":1,"37":3,"39":2,"40":2,"41":1,"42":6,"45":2,"46":3,"47":1,"49":8,"51":3,"52":16,"59":1,"60":3,"61":2,"63":2,"65":4,"67":43,"77":1,"78":2,"83":1,"84":1,"85":6,"86":3,"87":2,"88":1,"91":1,"92":2,"93":3,"97":3,"100":3,"101":1,"102":1,"103":1,"104":3,"106":8,"107":4,"108":17,"117":2,"118":4,"144":1,"150":1,"170":1,"174":5,"178":1,"179":19,"181":296,"183":123}}],["moment",{"2":{"1":1,"23":1,"27":1,"32":1,"43":1,"181":3}}],["mostly",{"2":{"91":1}}],["most",{"2":{"1":1,"8":1,"23":1,"26":1,"52":2,"61":1,"63":1,"66":2,"67":3,"83":1,"112":1,"114":2,"115":2,"125":1,"126":1,"136":1,"138":1,"150":1,"157":1,"161":1,"174":1,"179":4,"181":12,"183":10}}],["moreover",{"2":{"24":1}}],["more",{"2":{"0":2,"2":1,"5":3,"6":4,"7":4,"8":1,"10":1,"12":2,"13":3,"15":2,"16":1,"17":3,"19":2,"20":1,"21":2,"23":1,"24":3,"26":1,"28":1,"29":1,"37":1,"43":1,"52":4,"55":1,"56":1,"57":1,"58":1,"61":4,"66":1,"67":6,"74":1,"75":1,"76":1,"80":1,"81":1,"83":1,"84":1,"85":1,"88":1,"90":1,"92":1,"95":1,"98":2,"105":1,"106":2,"107":1,"108":5,"117":1,"126":1,"127":1,"130":1,"179":11,"180":1,"181":63,"183":34}}],["❌",{"2":{"0":26}}],["✅",{"2":{"0":46}}],["w",{"2":{"183":3}}],["wp",{"2":{"20":1,"181":2}}],["www",{"2":{"20":1,"181":2}}],["wraps",{"2":{"57":1,"181":8,"183":1}}],["wrap",{"2":{"57":2,"58":2,"93":6,"181":12,"183":1}}],["wrapped",{"2":{"93":1,"179":1,"181":1}}],["wrapper",{"2":{"19":1,"49":1,"52":3,"58":2,"67":1,"97":1,"179":3,"181":20,"183":2}}],["wrapping",{"2":{"53":1,"57":1}}],["wrong",{"2":{"51":1,"52":1,"88":1,"179":1,"181":2}}],["written",{"2":{"20":1,"140":3,"142":1,"181":2}}],["writing",{"2":{"13":1,"31":1,"36":1,"71":1,"165":1,"167":1,"169":1,"170":3,"177":1,"178":3,"181":2}}],["writer",{"2":{"140":2,"165":2,"167":1}}],["write",{"2":{"4":1,"11":1,"24":2,"104":1,"105":1,"108":4,"125":1,"130":2,"131":1,"163":3,"165":2,"167":3,"169":1,"170":2,"177":1,"178":2,"179":2,"181":2}}],["walk",{"2":{"99":1}}],["walkthrough",{"0":{"107":1,"108":1},"2":{"92":1}}],["wave",{"2":{"58":1,"181":1}}],["wake",{"2":{"58":1,"181":1}}],["warning",{"2":{"181":2,"183":1}}],["warnings",{"2":{"52":1,"179":1}}],["wars",{"2":{"12":1,"35":1,"41":1,"181":5}}],["waiting",{"2":{"108":1}}],["wait",{"2":{"21":1,"51":1,"52":2,"179":2,"181":2}}],["way",{"2":{"21":1,"24":1,"29":1,"49":1,"52":1,"63":1,"77":1,"79":1,"86":1,"87":1,"105":1,"125":1,"170":1,"178":1,"179":1,"181":3,"183":1}}],["ways",{"2":{"12":1,"41":1,"85":1,"92":1,"181":1}}],["was",{"2":{"7":1,"8":1,"10":1,"24":2,"47":1,"52":2,"58":1,"63":1,"67":1,"73":1,"77":1,"88":1,"106":1,"108":1,"120":1,"122":1,"130":1,"174":2,"179":4,"181":14,"183":5}}],["wanted",{"2":{"52":1,"179":1,"181":2}}],["wants",{"2":{"15":1}}],["want",{"2":{"2":1,"3":1,"7":2,"10":3,"11":1,"19":1,"21":1,"24":1,"51":1,"52":2,"61":2,"63":1,"66":2,"67":3,"74":2,"83":1,"88":2,"89":3,"97":1,"100":1,"103":1,"105":1,"106":3,"107":1,"179":2,"181":37,"183":7}}],["won",{"2":{"27":1,"28":1,"85":1}}],["wonders",{"2":{"8":1}}],["worth",{"0":{"77":1},"2":{"77":1}}],["worst",{"2":{"58":1,"181":1}}],["worry",{"2":{"37":1}}],["words",{"2":{"57":5,"58":13,"61":1,"67":1,"90":2,"114":1,"115":1,"124":1,"125":1,"126":1,"152":1,"161":2,"163":3,"167":1,"181":14,"183":2}}],["word",{"2":{"20":1,"21":4,"51":4,"52":3,"57":1,"67":2,"77":1,"110":1,"112":2,"114":1,"115":1,"117":1,"118":1,"120":1,"121":1,"122":1,"124":1,"125":1,"126":1,"127":1,"128":1,"130":3,"131":3,"132":1,"134":1,"136":2,"137":1,"138":2,"140":1,"141":1,"142":1,"144":1,"145":1,"147":1,"149":1,"150":1,"152":1,"153":1,"155":1,"156":1,"157":1,"160":1,"161":2,"162":1,"163":1,"164":1,"165":1,"166":1,"167":1,"168":1,"169":1,"170":1,"171":1,"172":1,"173":1,"174":1,"176":1,"177":1,"178":1,"179":3,"181":2,"183":9}}],["wordcount",{"2":{"13":1,"24":1,"89":1,"181":3}}],["world",{"2":{"12":1,"13":1,"14":1,"24":4,"52":3,"58":3,"104":1,"105":1,"107":2,"108":1,"110":1,"114":2,"117":1,"118":1,"120":1,"124":1,"125":1,"127":1,"136":1,"140":1,"141":1,"142":1,"144":1,"145":1,"147":1,"150":1,"153":1,"157":1,"160":1,"161":1,"162":1,"163":1,"164":1,"165":1,"166":1,"167":1,"168":1,"169":1,"170":1,"171":1,"172":1,"173":1,"176":1,"177":1,"178":1,"179":1,"181":21}}],["workaround",{"2":{"108":1}}],["workload",{"2":{"61":1}}],["workflow",{"0":{"11":1},"2":{"49":1,"52":1,"140":2,"141":2,"142":2,"181":1}}],["workflows",{"0":{"21":1},"2":{"0":1,"16":1,"21":1,"48":1,"85":1,"106":2,"181":2,"182":1}}],["working",{"0":{"32":1},"1":{"33":1,"34":1,"35":1,"36":1},"2":{"10":1,"22":1,"53":1,"56":1,"65":1,"76":1,"84":1,"85":1,"106":1,"130":1,"131":1,"132":1,"181":2,"183":1}}],["work",{"2":{"7":1,"11":1,"24":1,"32":1,"37":1,"52":1,"63":1,"88":1,"92":1,"93":1,"95":1,"103":1,"106":1,"140":1,"179":1,"183":2}}],["workspace",{"2":{"181":2}}],["works",{"0":{"99":1},"1":{"100":1,"101":1,"102":1,"103":1,"104":1,"105":1,"106":1,"107":1,"108":1},"2":{"0":1,"23":2,"26":1,"58":1,"79":1,"99":1,"107":1,"108":2,"181":7,"183":1}}],["would",{"0":{"8":1,"91":1},"2":{"3":1,"4":1,"7":2,"13":1,"24":3,"39":1,"61":1,"63":1,"65":2,"73":1,"74":2,"86":1,"87":1,"88":2,"89":1,"93":3,"108":1,"124":1,"125":1,"181":3,"183":3}}],["welcome",{"2":{"183":1}}],["well",{"2":{"2":1,"21":1,"29":1,"31":1,"51":1,"52":1,"58":1,"108":4,"121":1,"140":3,"142":1,"171":1,"172":1,"179":2,"181":5,"183":5}}],["weaker",{"2":{"181":1}}],["weaving",{"2":{"174":1}}],["weather",{"2":{"19":3,"181":39}}],["web",{"2":{"61":2,"117":1,"118":9,"166":1,"183":10}}],["websearch",{"2":{"54":1,"55":4,"180":5,"181":1}}],["website",{"2":{"11":1,"72":1}}],["were",{"2":{"20":1,"37":1,"65":1,"156":1,"181":1,"183":1}}],["weighs",{"2":{"19":1,"181":7}}],["weight",{"2":{"19":2,"181":12}}],["went",{"2":{"10":1,"106":1}}],["we",{"0":{"8":1,"90":1},"2":{"2":1,"3":3,"4":1,"5":3,"6":4,"7":8,"8":1,"10":6,"11":2,"12":1,"13":1,"15":1,"17":1,"18":2,"19":1,"20":1,"21":13,"22":2,"23":5,"24":3,"26":3,"27":1,"30":1,"31":1,"32":2,"36":2,"37":1,"42":1,"45":1,"51":13,"52":24,"58":1,"59":1,"61":5,"63":1,"65":2,"66":2,"67":4,"71":2,"74":5,"87":2,"88":14,"89":2,"90":1,"96":1,"99":1,"102":2,"103":1,"105":4,"106":4,"107":9,"108":23,"179":22,"181":61,"183":24}}],["wise",{"2":{"183":1}}],["wisp",{"2":{"58":1,"181":1}}],["wiki",{"2":{"179":2}}],["wikipedia",{"2":{"179":2,"183":3}}],["width",{"2":{"57":1,"58":3,"181":12,"183":5}}],["wide",{"2":{"30":1,"31":1,"52":1,"152":1,"179":1}}],["wins",{"2":{"179":9}}],["winning",{"2":{"52":2,"179":2}}],["winks",{"2":{"41":2}}],["win",{"2":{"11":1}}],["windows",{"2":{"58":2,"61":1,"79":1,"181":2}}],["window",{"2":{"2":1,"58":2,"79":1,"95":1,"179":1,"181":3,"183":12}}],["will",{"2":{"1":2,"2":2,"4":1,"10":1,"13":2,"18":2,"19":1,"21":2,"24":1,"28":1,"35":1,"41":2,"49":1,"51":1,"52":24,"58":5,"61":1,"63":1,"65":1,"67":21,"69":1,"74":1,"76":1,"77":2,"79":1,"87":2,"89":4,"92":3,"93":1,"95":2,"96":1,"97":1,"102":4,"106":1,"108":1,"112":1,"115":1,"130":1,"136":1,"138":1,"140":1,"142":1,"152":1,"169":1,"170":1,"171":2,"172":2,"177":1,"178":1,"179":36,"181":72,"183":40}}],["without",{"2":{"41":1,"52":1,"58":1,"63":1,"73":1,"92":1,"105":1,"118":1,"120":1,"149":1,"156":1,"174":1,"181":5,"183":3}}],["within",{"2":{"10":1,"58":2,"67":1,"106":1,"120":1,"179":2,"181":14,"183":7}}],["with",{"0":{"1":1,"21":1,"32":1,"33":1,"37":1,"38":1,"43":1,"44":1,"97":1,"98":1},"1":{"2":1,"33":1,"34":2,"35":2,"36":2,"38":1,"39":2,"40":2,"41":2,"42":2,"43":1,"44":1,"45":2,"46":2,"47":2},"2":{"0":4,"1":3,"2":1,"6":2,"7":4,"8":1,"10":12,"11":2,"12":2,"13":4,"14":1,"15":1,"18":1,"19":5,"20":2,"21":5,"22":2,"23":5,"24":11,"26":2,"27":4,"28":2,"29":1,"30":3,"31":4,"32":1,"34":2,"35":1,"36":1,"37":2,"39":2,"40":1,"41":1,"42":4,"45":1,"46":1,"49":5,"51":6,"52":27,"53":1,"56":2,"58":8,"59":3,"60":1,"61":10,"63":3,"64":4,"65":1,"66":2,"67":24,"69":1,"71":1,"72":2,"73":1,"74":2,"77":4,"78":1,"79":1,"84":3,"85":2,"86":1,"87":4,"88":5,"89":2,"91":3,"92":2,"93":7,"95":3,"97":2,"99":1,"100":2,"101":1,"104":3,"105":7,"106":13,"107":2,"108":9,"110":1,"112":2,"114":1,"117":2,"118":2,"121":2,"122":1,"124":1,"125":1,"126":3,"127":2,"128":1,"130":3,"131":2,"132":1,"134":1,"136":3,"138":2,"140":6,"141":4,"142":3,"144":2,"145":1,"147":1,"150":1,"155":2,"156":3,"157":2,"163":5,"164":2,"166":1,"168":1,"170":2,"171":1,"172":1,"173":1,"174":4,"176":1,"178":2,"179":50,"181":167,"183":69}}],["whose",{"2":{"181":2}}],["who",{"2":{"27":1,"55":2,"157":1,"180":2}}],["whole",{"0":{"7":1},"2":{"10":1,"37":1,"87":2,"103":1,"106":1,"170":1,"178":1,"179":1,"181":5}}],["while",{"2":{"130":1,"170":1,"178":1,"179":1,"181":1}}],["whispered",{"2":{"58":4,"181":4}}],["white",{"2":{"21":1,"51":1,"52":1,"58":1,"179":1,"181":6}}],["whichever",{"2":{"46":1}}],["which",{"2":{"0":1,"7":3,"10":3,"17":1,"21":1,"24":1,"28":1,"34":1,"37":2,"42":1,"49":4,"52":4,"58":2,"60":2,"61":2,"63":1,"66":1,"67":5,"74":1,"83":1,"89":1,"90":1,"91":1,"92":1,"100":1,"101":1,"106":3,"107":1,"108":2,"120":2,"140":1,"155":1,"170":1,"178":1,"179":5,"181":50,"183":23}}],["why",{"0":{"69":1},"1":{"70":1},"2":{"13":1,"19":1,"52":2,"108":1,"130":1,"131":1,"179":1,"181":3}}],["whatever",{"2":{"52":1,"83":1,"179":1,"181":1}}],["what",{"0":{"8":1,"70":1,"92":1},"2":{"2":3,"5":1,"6":1,"7":5,"11":1,"12":2,"13":3,"15":1,"19":1,"21":1,"24":1,"35":1,"40":1,"41":1,"51":1,"52":2,"58":1,"61":4,"67":4,"78":2,"87":3,"88":1,"92":3,"97":4,"98":1,"100":1,"107":3,"120":1,"130":4,"141":1,"150":1,"165":4,"179":2,"181":30,"183":13}}],["whether",{"2":{"7":4,"10":3,"17":1,"52":2,"55":3,"67":4,"106":3,"108":1,"137":2,"179":8,"180":3,"181":33,"183":12}}],["whenever",{"2":{"183":3}}],["when",{"2":{"0":1,"10":4,"11":1,"17":1,"21":2,"22":1,"24":4,"28":1,"49":5,"51":1,"52":2,"61":1,"63":3,"66":1,"67":1,"73":1,"84":1,"85":1,"88":1,"89":1,"100":1,"104":2,"105":2,"106":4,"107":2,"108":3,"150":1,"152":1,"155":2,"156":1,"160":1,"164":1,"166":1,"168":1,"169":3,"173":1,"176":1,"177":1,"179":8,"181":31,"183":5}}],["whereas",{"2":{"7":1,"108":1}}],["where",{"2":{"0":1,"7":1,"10":1,"12":1,"42":1,"49":1,"52":4,"57":1,"58":1,"61":1,"67":3,"73":1,"74":1,"79":1,"88":1,"92":1,"95":1,"105":1,"106":1,"108":1,"124":1,"125":1,"156":2,"163":1,"179":7,"181":26,"183":22}}],["b64",{"2":{"181":4}}],["b",{"2":{"170":2,"178":2,"179":1,"183":3}}],["b>",{"2":{"58":2,"181":12,"183":2}}],["br",{"2":{"183":1}}],["broader",{"2":{"181":2}}],["browser",{"2":{"84":1}}],["brand",{"2":{"108":4}}],["branching",{"2":{"52":1,"179":1}}],["branch",{"2":{"52":1,"179":1}}],["branches",{"2":{"52":4,"58":1,"179":4,"181":1}}],["brackets",{"2":{"61":1,"67":2,"156":1,"183":3}}],["br>",{"2":{"58":1,"181":6,"183":1}}],["breath",{"2":{"131":1,"177":1}}],["break",{"2":{"61":1,"131":1,"132":1}}],["breaks",{"2":{"58":1,"181":1,"183":1}}],["bread",{"2":{"31":2,"108":2}}],["bright",{"2":{"181":2}}],["bring",{"2":{"96":1}}],["brings",{"2":{"12":1}}],["briefly",{"2":{"156":1}}],["brief",{"2":{"24":3,"57":1,"104":1,"105":1,"107":2,"110":1,"117":1,"118":1,"124":1,"130":1,"155":3,"160":1,"161":1,"162":1,"163":7,"164":1,"166":1,"167":1,"168":1,"169":1,"170":1,"173":1,"174":1,"176":1,"177":1,"178":1}}],["bge",{"2":{"30":1}}],["binx",{"2":{"183":4}}],["bin",{"2":{"183":4}}],["binint",{"2":{"183":4}}],["binary",{"2":{"183":18}}],["binarycosinesimilarity",{"2":{"181":1,"183":5}}],["binarybatchembedder",{"2":{"181":1,"183":5}}],["biology",{"2":{"161":1}}],["billing",{"2":{"75":2,"76":1,"95":1}}],["bigger",{"2":{"108":1}}],["big",{"2":{"58":1,"61":1,"65":1,"67":1,"181":1,"183":1}}],["bitmatrix",{"2":{"183":3}}],["bits",{"2":{"181":1,"183":13}}],["bitpackedcosinesimilarity",{"2":{"181":1,"183":5}}],["bitpackedbatchembedder",{"2":{"181":1,"183":5}}],["bit",{"2":{"28":1,"39":1,"66":1,"108":1,"161":1,"183":4}}],["biases",{"2":{"181":1}}],["bias",{"2":{"10":1,"18":1,"106":1,"181":12}}],["blank",{"2":{"149":1}}],["blanksystemuser",{"0":{"149":1},"2":{"92":2,"93":1,"179":1,"181":3}}],["black",{"2":{"21":1,"51":1,"52":1,"58":1,"108":1,"179":1,"181":6,"183":1}}],["blogtitleimagegenerator",{"0":{"152":1}}],["blog",{"2":{"140":1,"152":4,"167":4}}],["blob",{"2":{"58":1,"181":14,"183":1}}],["block",{"2":{"52":15,"67":3,"130":1,"163":1,"179":3,"181":27,"183":7}}],["blocks",{"2":{"52":6,"67":2,"170":2,"178":2,"179":3,"181":25,"183":2}}],["blocking",{"2":{"11":1,"98":1}}],["blue",{"2":{"21":1,"51":1,"52":3,"61":1,"179":3,"181":2,"183":3}}],["bang",{"2":{"97":1}}],["bandit",{"2":{"52":1,"179":2}}],["barplot",{"2":{"67":1,"183":1}}],["bad",{"2":{"52":1,"58":1,"179":1,"181":1}}],["bakllava",{"2":{"42":1,"43":1,"181":3}}],["balance",{"2":{"35":1,"41":2,"75":2}}],["baai",{"2":{"30":1}}],["backpropagate",{"2":{"179":5,"181":1}}],["backticks",{"2":{"130":1,"131":1,"157":1,"181":1}}],["back",{"2":{"52":4,"87":1,"88":1,"107":1,"179":5,"181":5}}],["backspace",{"2":{"24":1}}],["background",{"2":{"11":1,"22":1,"84":1}}],["batched",{"2":{"183":3}}],["batchembedder",{"2":{"67":1,"181":1,"183":11}}],["batch",{"2":{"14":1,"67":3,"183":21}}],["bash",{"2":{"28":1,"173":1}}],["basename",{"2":{"89":1,"181":1}}],["base",{"2":{"23":2,"26":1,"27":1,"52":1,"59":1,"156":1,"181":11,"183":1}}],["base64decode",{"2":{"181":1}}],["base64",{"2":{"10":1,"104":1,"106":1,"181":2}}],["based",{"2":{"7":2,"52":3,"61":2,"63":1,"66":6,"67":8,"110":2,"112":4,"117":1,"118":1,"120":1,"121":1,"122":1,"126":1,"130":2,"131":2,"132":1,"137":1,"138":1,"141":1,"152":1,"161":2,"171":1,"172":1,"179":10,"181":21,"183":27}}],["basic",{"0":{"109":1},"1":{"110":1},"2":{"10":1,"55":3,"100":1,"104":1,"106":1,"180":3}}],["bold",{"2":{"183":4}}],["body",{"2":{"163":2,"181":15}}],["bodies",{"2":{"58":1,"181":1}}],["border",{"2":{"58":4,"181":24,"183":4}}],["boundary",{"2":{"183":1}}],["boundaries",{"2":{"181":1,"183":3}}],["bound",{"2":{"52":7,"179":9}}],["bounds",{"2":{"52":1,"120":1,"179":1}}],["bool=isnothing",{"2":{"181":1}}],["bool=true",{"2":{"52":4,"67":1,"179":1,"181":5,"183":1}}],["bool=false",{"2":{"52":12,"179":6,"181":36,"183":1}}],["boolean",{"2":{"52":2,"88":1,"179":4,"181":20,"183":13}}],["bool",{"2":{"10":2,"21":1,"51":1,"52":14,"55":3,"67":15,"88":3,"106":2,"108":2,"179":16,"180":3,"181":103,"183":63}}],["both",{"2":{"7":4,"21":2,"41":1,"51":2,"52":2,"67":1,"131":1,"179":2,"181":7,"183":9}}],["bm25similarity",{"2":{"67":1,"181":1,"183":6}}],["bm25",{"2":{"8":1,"67":2,"183":15}}],["buffer",{"2":{"181":1}}],["business",{"2":{"163":1}}],["bullets",{"2":{"183":1}}],["bullet",{"2":{"130":5,"155":7,"156":5,"163":3}}],["bundle",{"2":{"91":1}}],["buy",{"2":{"77":1,"95":1}}],["bug",{"2":{"73":2}}],["but",{"2":{"5":1,"6":2,"7":2,"10":3,"11":2,"12":2,"13":1,"18":1,"19":2,"23":1,"24":2,"28":2,"29":1,"30":1,"31":1,"41":2,"55":1,"58":3,"61":1,"67":2,"73":1,"74":1,"79":1,"87":1,"92":1,"98":1,"99":1,"104":1,"106":3,"108":2,"142":1,"156":1,"176":1,"179":1,"180":1,"181":27,"183":11}}],["built",{"2":{"4":1,"60":1,"63":1,"114":1,"179":1,"181":2,"183":1}}],["builds",{"2":{"67":1,"181":7,"183":4}}],["build",{"2":{"1":1,"2":6,"4":1,"8":2,"10":2,"12":1,"49":1,"59":1,"60":4,"61":4,"63":3,"64":3,"66":4,"67":12,"108":1,"181":19,"183":40}}],["building",{"0":{"1":1},"1":{"2":1},"2":{"21":1,"48":1,"52":1,"59":1,"60":1,"61":1,"67":2,"166":1,"179":2,"182":1,"183":6}}],["berlin",{"2":{"183":1}}],["bearer",{"2":{"181":3}}],["belong",{"2":{"156":1,"157":1,"181":1}}],["below",{"2":{"0":1,"22":1,"52":2,"69":1,"73":1,"74":1,"83":1,"108":1,"115":1,"117":1,"125":1,"163":1,"169":1,"177":1,"179":2,"181":8,"183":1}}],["believe",{"2":{"130":1,"131":1,"132":1}}],["begin",{"2":{"130":1,"140":1,"170":3,"178":3,"181":1}}],["beginning",{"2":{"87":1,"120":1,"181":3}}],["beginners",{"2":{"61":1}}],["begins",{"2":{"66":1,"174":1}}],["beneath",{"2":{"58":2,"181":2}}],["benefits",{"2":{"108":1}}],["benefit",{"2":{"52":1,"179":1}}],["behave",{"2":{"104":1}}],["behavior",{"2":{"49":1,"58":2,"63":2,"78":2,"86":1,"93":1,"108":1,"181":2}}],["behavioural",{"2":{"157":1,"161":1}}],["behaviours",{"2":{"52":1,"179":1}}],["behaviour",{"2":{"21":1,"51":1,"52":1,"108":1,"179":1}}],["behind",{"2":{"58":1,"174":1,"181":1}}],["besides",{"2":{"181":2}}],["bespoke",{"2":{"52":1,"181":2}}],["best",{"2":{"5":1,"21":1,"23":2,"26":2,"30":1,"31":1,"34":1,"49":1,"51":1,"52":3,"61":7,"67":1,"89":2,"136":2,"138":2,"142":1,"157":1,"158":1,"159":1,"163":1,"179":12,"181":5,"183":7}}],["before",{"2":{"52":4,"57":1,"58":1,"74":1,"79":1,"87":1,"95":2,"130":1,"144":2,"155":1,"156":1,"179":1,"181":11,"183":1}}],["been",{"2":{"23":1,"26":1,"42":1,"52":3,"92":1,"105":1,"131":1,"141":1,"142":1,"179":4,"181":5}}],["becoming",{"2":{"12":1}}],["become",{"2":{"12":3,"24":1,"35":1,"41":2,"181":7}}],["because",{"2":{"7":1,"11":1,"21":1,"23":1,"26":1,"28":2,"36":1,"51":1,"52":1,"67":1,"106":1,"108":3,"179":3,"181":5,"183":2}}],["beta",{"2":{"179":3,"181":5}}],["betwee",{"2":{"52":1,"179":1}}],["between",{"2":{"6":1,"7":2,"10":1,"16":2,"21":1,"51":1,"52":5,"57":4,"58":5,"61":2,"100":1,"104":1,"121":1,"140":2,"141":2,"161":1,"179":6,"181":15,"183":11}}],["better",{"2":{"8":1,"49":1,"55":1,"58":2,"67":1,"79":1,"108":3,"117":3,"118":3,"140":1,"142":2,"180":1,"181":4,"183":5}}],["be",{"0":{"73":2},"2":{"1":1,"2":1,"7":2,"10":5,"12":1,"15":4,"16":1,"17":1,"18":1,"21":3,"22":1,"23":1,"24":1,"26":1,"28":1,"35":1,"41":1,"49":2,"51":3,"52":23,"55":1,"57":1,"58":7,"59":1,"60":1,"61":3,"63":2,"65":3,"66":1,"67":39,"69":1,"74":1,"75":1,"76":1,"78":1,"82":1,"84":1,"85":1,"88":2,"89":3,"90":1,"92":2,"93":2,"96":1,"97":2,"99":1,"100":1,"101":1,"102":1,"103":1,"106":4,"108":2,"110":1,"112":3,"115":1,"117":1,"118":1,"120":1,"121":1,"124":1,"125":1,"126":1,"130":6,"131":4,"136":1,"138":2,"140":3,"141":1,"142":1,"150":1,"152":3,"155":5,"156":4,"157":8,"161":1,"163":2,"167":1,"170":4,"171":2,"172":2,"174":3,"178":4,"179":33,"180":1,"181":211,"182":2,"183":90}}],["being",{"2":{"0":1,"5":1,"7":1,"41":1,"52":1,"155":1,"179":1,"181":7}}],["by",{"2":{"0":3,"6":2,"10":2,"13":1,"21":2,"23":1,"24":6,"27":1,"49":3,"52":5,"57":1,"58":5,"59":1,"60":2,"61":4,"63":2,"65":1,"67":6,"71":1,"72":1,"73":1,"90":2,"92":2,"93":1,"106":1,"107":2,"108":2,"112":2,"114":1,"120":1,"121":1,"124":1,"125":1,"128":1,"130":4,"131":3,"132":1,"140":5,"142":2,"144":1,"155":1,"156":1,"158":1,"159":2,"165":1,"167":1,"169":1,"174":1,"177":1,"179":12,"181":62,"183":41}}],["europe",{"2":{"183":4}}],["eyes",{"2":{"181":1}}],["educator",{"2":{"167":1}}],["educational",{"2":{"107":1,"167":1}}],["editor",{"2":{"140":4}}],["editing",{"2":{"11":1}}],["e2e",{"2":{"64":1}}],["echoes",{"2":{"181":5}}],["echoing",{"2":{"58":1,"181":1}}],["ecosystem",{"2":{"124":1}}],["econometrics",{"2":{"61":1}}],["et",{"2":{"183":3}}],["ethos",{"2":{"83":1}}],["ethereal",{"2":{"58":1,"181":1}}],["etc",{"2":{"0":1,"2":1,"15":1,"21":1,"24":1,"51":1,"52":2,"67":1,"69":1,"86":1,"92":1,"104":1,"106":1,"114":2,"117":1,"153":1,"162":1,"179":2,"181":12,"183":13}}],["equally",{"2":{"181":1}}],["equality",{"2":{"88":1}}],["equal",{"2":{"52":1,"55":1,"58":2,"67":1,"179":1,"180":1,"181":2,"183":3}}],["equivalent",{"2":{"2":2,"13":1,"61":1,"106":1,"181":1}}],["essence",{"2":{"156":1}}],["essential",{"2":{"121":1,"179":2}}],["estimated",{"2":{"98":1}}],["estimate",{"2":{"36":1,"181":3}}],["especially",{"2":{"22":1,"52":1,"57":1,"84":1,"85":1,"107":1,"157":1,"179":2}}],["elapsed",{"2":{"181":22,"183":1}}],["elaboration",{"2":{"120":1}}],["elicit",{"2":{"150":1}}],["else`",{"2":{"171":1,"172":1}}],["elseif",{"2":{"171":1,"172":1}}],["else",{"2":{"18":2,"41":1,"52":2,"88":2,"117":2,"118":2,"161":1,"179":2,"181":5}}],["elementwise",{"2":{"183":1}}],["element",{"2":{"13":2,"16":1,"19":1,"22":1,"24":3,"47":1,"87":1,"89":2,"92":3,"107":2,"181":15,"183":1}}],["evolving",{"2":{"179":1}}],["evolved",{"2":{"156":1}}],["ever",{"2":{"181":2}}],["everyone",{"2":{"167":1}}],["every",{"2":{"63":1,"74":1,"77":1,"79":1,"85":1,"103":1,"153":1,"179":1,"183":1}}],["everything",{"2":{"18":2,"88":1,"93":1,"181":7}}],["eventmessage",{"2":{"181":1}}],["event",{"2":{"181":2}}],["even",{"2":{"19":2,"29":1,"30":1,"51":1,"52":2,"55":1,"58":1,"65":1,"155":1,"179":1,"180":1,"181":5,"183":1}}],["eval=false",{"2":{"52":2,"181":2}}],["evalutes",{"2":{"179":1}}],["evaluted",{"2":{"52":1,"179":1}}],["evaluator",{"2":{"134":2,"179":3}}],["evaluating",{"2":{"17":1,"52":2,"120":1,"137":1,"140":1,"181":2,"183":1}}],["evaluation",{"0":{"119":1},"1":{"120":1,"121":1,"122":1},"2":{"4":1,"6":2,"8":1,"10":1,"49":2,"52":10,"60":1,"106":1,"121":1,"122":1,"179":10,"181":10,"183":9}}],["evaluations",{"0":{"3":1},"1":{"4":1,"5":1,"6":1,"7":1},"2":{"52":3,"67":2,"179":5,"183":4}}],["evaluated",{"2":{"52":8,"130":1,"131":1,"179":3,"181":9}}],["evaluates",{"2":{"21":1,"51":1,"52":3,"179":2,"181":3,"183":2}}],["evaluate",{"0":{"6":1,"7":1},"2":{"3":1,"6":2,"7":2,"21":1,"51":1,"52":11,"121":2,"141":1,"142":1,"179":22,"181":4}}],["eval",{"2":{"7":2,"24":1,"52":9,"181":13,"183":1}}],["evals",{"2":{"4":6,"5":1,"6":3,"7":7,"60":1,"67":4,"181":3,"183":18}}],["effectiveness",{"2":{"140":1,"142":1}}],["effective",{"2":{"125":1,"127":1,"150":1}}],["effectively",{"2":{"12":1,"57":1,"65":2,"86":1,"103":1,"107":2,"179":1,"181":7}}],["efficiently",{"2":{"61":1,"142":1,"179":1}}],["efficient",{"2":{"52":2,"56":1,"61":1,"163":2,"179":2,"183":4}}],["effort",{"2":{"13":1,"59":1}}],["emails",{"2":{"163":3}}],["email",{"2":{"163":10}}],["emphasize",{"2":{"140":1,"171":1,"172":1}}],["empty",{"0":{"73":2},"2":{"52":2,"55":2,"58":6,"67":2,"179":1,"180":2,"181":40,"183":4}}],["emb",{"2":{"61":1,"183":21}}],["embedder",{"2":{"67":17,"183":29}}],["embedded",{"2":{"46":1,"67":1,"183":6}}],["embedding",{"0":{"45":1,"46":1},"2":{"2":1,"8":2,"16":1,"30":1,"31":1,"46":2,"66":1,"67":8,"181":9,"183":54}}],["embeddings",{"0":{"16":1,"44":1},"1":{"45":1,"46":1,"47":1},"2":{"2":1,"10":2,"16":3,"47":1,"64":2,"66":4,"67":8,"104":1,"106":2,"181":20,"183":62}}],["embeds",{"2":{"2":1,"67":1,"183":4}}],["embed",{"2":{"2":3,"16":3,"22":3,"23":1,"26":1,"30":1,"31":2,"45":2,"46":4,"47":1,"181":4}}],["emotions",{"2":{"58":2,"181":2}}],["emotional",{"2":{"12":1}}],["either",{"2":{"10":1,"19":1,"23":1,"26":1,"42":1,"55":1,"63":1,"74":1,"88":1,"106":1,"179":1,"180":1,"181":7}}],["e",{"2":{"10":1,"19":1,"52":4,"106":1,"108":2,"112":1,"136":1,"138":1,"141":1,"179":2,"181":13,"183":1}}],["error=true",{"2":{"181":3}}],["errorexception",{"2":{"52":1,"179":1}}],["errors",{"2":{"19":1,"21":1,"22":1,"49":1,"51":1,"52":7,"67":1,"75":1,"108":1,"130":2,"132":1,"142":3,"179":10,"181":3,"183":5}}],["error",{"0":{"73":2,"74":1,"75":1},"2":{"7":2,"49":2,"52":13,"73":2,"74":3,"75":4,"88":1,"108":4,"130":2,"131":1,"179":14,"181":33}}],["earlier",{"2":{"73":1}}],["eating",{"2":{"31":1,"108":1}}],["easiest",{"2":{"83":1,"86":1,"97":1}}],["easier",{"2":{"0":1,"7":1,"29":1,"56":1,"58":1,"67":1,"86":1,"106":1,"155":1,"181":1,"183":2}}],["easily",{"2":{"15":1,"60":1,"67":1,"88":1,"91":1,"97":1,"181":2,"183":2}}],["easy",{"2":{"6":1,"47":1,"79":1,"82":1,"84":1,"92":1,"95":1,"108":1,"149":1,"156":1,"167":1,"181":5}}],["each",{"2":{"2":2,"4":1,"7":4,"10":2,"12":1,"15":1,"21":2,"51":2,"52":3,"58":12,"61":1,"63":2,"65":1,"66":2,"67":21,"77":1,"106":1,"112":1,"114":1,"115":1,"121":1,"126":1,"130":1,"141":1,"142":1,"155":3,"156":6,"157":4,"163":1,"167":1,"170":2,"174":2,"178":2,"179":6,"181":31,"183":59}}],["exit",{"2":{"24":1}}],["existing",{"2":{"24":1,"63":1,"67":2,"93":1,"117":1,"118":1,"181":5,"183":3}}],["existent",{"2":{"21":1,"51":1,"52":1,"179":1}}],["exists",{"2":{"7":2,"183":1}}],["exist",{"2":{"6":1,"7":6,"92":4}}],["exclamation",{"2":{"183":1}}],["exclude",{"2":{"55":2,"156":1,"180":2,"181":3,"183":3}}],["exciting",{"2":{"23":1,"27":1}}],["excessive",{"2":{"61":1}}],["exceed",{"2":{"74":1,"181":2}}],["exceeds",{"2":{"58":1,"181":1}}],["exceeding",{"2":{"58":1,"67":1,"181":1,"183":7}}],["exceeded",{"0":{"74":1},"2":{"21":1,"51":1,"74":2,"75":1}}],["exception",{"2":{"52":4,"63":1,"108":1,"179":2,"181":3}}],["except",{"2":{"21":1,"179":1,"181":1}}],["external",{"2":{"120":1,"181":1}}],["extension",{"2":{"181":2,"183":1}}],["extensions",{"2":{"49":1,"59":1}}],["extensively",{"2":{"165":1,"167":1}}],["extensive",{"2":{"164":1,"171":1}}],["extensible",{"2":{"52":1,"60":1,"63":1,"179":1}}],["extended",{"2":{"52":1,"179":1,"181":1}}],["extend",{"2":{"15":1,"23":1,"59":1}}],["extremely",{"2":{"75":1,"121":2,"131":1,"140":1,"165":1,"167":1}}],["extras",{"2":{"67":3,"181":2,"183":8}}],["extra",{"2":{"28":1,"58":2,"67":1,"88":1,"92":1,"97":1,"156":1,"181":5,"183":1}}],["extractdata",{"0":{"147":1}}],["extractdataxml",{"0":{"145":1}}],["extractdatacotxml",{"0":{"144":1},"2":{"181":1}}],["extracted",{"2":{"114":3,"181":21}}],["extractor",{"2":{"108":1}}],["extraction",{"0":{"19":1,"146":1},"1":{"147":1},"2":{"31":1,"67":4,"108":1,"114":2,"144":2,"145":2,"147":2,"181":17,"183":6}}],["extracting",{"2":{"2":1,"19":3,"67":1,"181":2,"183":1}}],["extracts",{"2":{"2":1,"49":1,"52":1,"66":2,"67":2,"114":1,"115":1,"179":1,"181":4,"183":7}}],["extract",{"2":{"2":2,"8":1,"10":2,"19":3,"31":2,"52":5,"93":2,"106":2,"107":1,"108":10,"114":4,"144":1,"145":1,"147":1,"179":5,"181":74,"183":9}}],["executing",{"2":{"183":2}}],["execution",{"0":{"14":1},"2":{"10":1,"49":2,"52":11,"106":1,"131":1,"132":1,"179":6,"181":14}}],["executor",{"2":{"52":1,"181":1}}],["execute",{"2":{"52":3,"106":1,"179":1,"181":11}}],["executed",{"2":{"49":1,"52":4,"131":1,"179":2,"181":5}}],["executes",{"2":{"10":1,"49":2,"52":2,"106":1,"179":4,"181":3}}],["examine",{"2":{"142":1}}],["example>",{"2":{"178":2}}],["examples",{"0":{"9":1,"50":1,"61":1},"1":{"10":1,"11":1,"12":1,"13":1,"14":1,"15":1,"16":1,"17":1,"18":1,"19":1,"20":1,"21":1,"22":1,"23":1,"51":1},"2":{"2":5,"4":2,"5":1,"6":1,"13":2,"21":1,"24":1,"32":1,"37":1,"52":2,"58":3,"61":2,"67":9,"70":1,"86":1,"91":1,"92":1,"98":2,"114":1,"157":2,"164":1,"166":1,"169":1,"173":1,"177":1,"179":1,"181":24,"183":20}}],["example",{"0":{"107":1,"108":1},"2":{"0":1,"1":1,"2":1,"5":1,"7":1,"10":1,"13":1,"15":1,"17":1,"18":1,"21":1,"24":3,"31":1,"37":1,"42":1,"49":1,"52":6,"55":1,"58":3,"61":1,"65":1,"67":5,"77":2,"78":3,"86":1,"88":3,"90":1,"91":1,"92":2,"93":1,"99":1,"102":1,"105":1,"106":2,"108":4,"114":1,"120":1,"157":4,"161":1,"170":1,"179":10,"180":1,"181":52,"183":21}}],["exact",{"2":{"67":2,"181":7,"183":10}}],["exactly",{"2":{"10":2,"21":1,"24":1,"49":1,"52":1,"67":1,"106":2,"179":5,"181":1,"183":2}}],["expr",{"2":{"181":4}}],["expression",{"2":{"52":6,"140":1,"181":8}}],["export",{"2":{"79":1,"95":1}}],["exported",{"2":{"24":1,"37":1,"52":1,"179":1}}],["expanded",{"2":{"181":2}}],["expanding",{"2":{"126":1}}],["expand",{"2":{"92":1,"107":1,"126":1,"179":8,"181":1}}],["expands",{"2":{"17":1,"92":1,"179":2}}],["expanse",{"2":{"58":1,"181":1}}],["expect",{"2":{"181":1}}],["expected",{"2":{"181":2}}],["expectations",{"2":{"142":1}}],["expects",{"2":{"136":1,"138":1,"170":1,"178":1,"181":1}}],["expertise",{"2":{"164":1}}],["expert",{"2":{"125":1,"127":1,"142":4,"144":1,"145":1,"147":1,"163":1,"164":2,"167":1,"170":1,"173":1,"178":1,"181":1}}],["experiencing",{"2":{"73":1}}],["experience",{"2":{"12":1}}],["experiences",{"2":{"12":1}}],["experiment",{"2":{"52":1,"91":1,"179":1,"181":1}}],["experimental",{"0":{"21":1,"182":1},"2":{"1":3,"10":4,"21":2,"48":3,"52":6,"53":2,"55":1,"57":1,"59":3,"63":1,"66":1,"67":6,"88":1,"106":2,"179":70,"180":4,"181":177,"182":5,"183":281}}],["expensive",{"2":{"3":1,"52":3,"179":6}}],["exploits",{"2":{"181":2}}],["exploitation",{"2":{"179":1}}],["exploration",{"2":{"179":1}}],["explorer",{"2":{"73":1}}],["explore",{"0":{"5":1},"2":{"5":1,"8":1,"41":1,"52":2,"61":1,"88":1,"179":2,"181":1}}],["explanatory",{"2":{"181":1}}],["explanations",{"2":{"0":1,"19":1,"141":1,"142":1}}],["explaining",{"2":{"170":1,"178":1}}],["explain",{"2":{"112":1,"130":1,"131":1,"144":1,"174":1,"183":1}}],["explains",{"2":{"99":1,"174":1}}],["explicit",{"2":{"67":2,"144":1,"145":1,"147":1,"181":8,"183":2}}],["explicitly",{"2":{"2":1,"13":1,"21":1,"23":1,"26":1,"42":1,"52":3,"85":1,"89":1,"124":1,"163":2,"171":1,"172":1,"179":3,"181":5,"182":1}}],["enforces",{"2":{"181":2}}],["enforce",{"2":{"181":14}}],["encapsulates",{"2":{"179":1}}],["encapsulated",{"2":{"179":1}}],["encoding",{"2":{"181":1}}],["encode",{"2":{"108":2,"181":7}}],["encoded",{"2":{"10":1,"104":1,"106":1,"181":3}}],["encouraging",{"2":{"163":1}}],["enclosed",{"2":{"131":1}}],["enclose",{"2":{"130":1}}],["enhance",{"2":{"127":1,"140":2,"150":1,"183":2}}],["enhancing",{"2":{"53":1,"140":1,"156":1,"181":1}}],["enabling",{"2":{"66":1,"179":1,"181":1}}],["enable",{"2":{"93":1,"108":1,"181":2,"183":1}}],["enables",{"2":{"10":1,"49":1,"67":2,"106":1,"181":4,"183":8}}],["enabled",{"2":{"10":1,"67":1,"106":1,"183":2}}],["enigmatic",{"2":{"58":2,"181":2}}],["enough",{"2":{"58":2,"181":3}}],["ensuring",{"2":{"58":1,"150":1,"153":1,"181":1}}],["ensure",{"2":{"0":1,"52":2,"78":1,"88":3,"108":1,"127":2,"130":2,"142":2,"144":1,"145":1,"147":1,"155":1,"156":1,"163":1,"179":2,"181":4,"183":4}}],["ensures",{"2":{"0":1,"58":1,"181":1}}],["enjoy",{"2":{"41":1}}],["en",{"2":{"30":1,"179":2}}],["engaging",{"2":{"174":1}}],["engagement",{"2":{"140":3}}],["engage",{"2":{"30":1}}],["english",{"2":{"67":1,"183":5}}],["engineer",{"2":{"150":1}}],["engineering",{"2":{"17":1,"150":1}}],["engine",{"2":{"54":1,"108":1,"114":2,"126":1,"153":1}}],["enumerates",{"2":{"183":4}}],["enumerated",{"2":{"181":2}}],["enumerate",{"2":{"67":1,"183":1}}],["enum",{"2":{"19":1,"88":2}}],["entire",{"2":{"181":15}}],["entity",{"2":{"126":1}}],["entities",{"2":{"12":1,"19":1,"58":1,"114":1,"126":1,"181":3}}],["entry",{"2":{"63":1,"67":1,"149":1,"181":1,"183":1}}],["entries",{"2":{"6":1,"108":2,"181":1}}],["enter",{"2":{"24":2}}],["end=25",{"2":{"183":1}}],["ended",{"2":{"181":1,"183":1}}],["end|>",{"2":{"181":2}}],["end>",{"2":{"181":1}}],["ending",{"2":{"181":1}}],["end`",{"2":{"130":1,"170":2,"171":1,"172":1,"178":2}}],["end",{"2":{"7":1,"19":3,"21":1,"31":1,"34":1,"46":1,"51":1,"52":11,"58":1,"61":2,"63":1,"67":4,"74":1,"88":6,"91":2,"108":5,"170":1,"178":1,"179":15,"181":26,"183":12}}],["endpoints",{"2":{"0":1,"181":1}}],["endpoint",{"2":{"0":2,"67":2,"138":5,"180":1,"181":9,"183":6}}],["environments",{"2":{"52":1,"179":1}}],["environment",{"0":{"79":1},"2":{"32":1,"54":1,"61":1,"73":1,"78":1,"79":2,"93":1,"95":2,"181":11}}],["env",{"2":{"0":1,"23":1,"26":1,"29":2,"30":1,"31":1,"73":3,"78":1,"79":1,"95":2,"181":7,"183":1}}],["eg",{"2":{"0":1,"2":1,"6":1,"7":2,"8":1,"10":9,"12":2,"13":1,"15":2,"21":6,"23":2,"24":1,"25":1,"27":2,"28":1,"37":1,"42":1,"51":5,"52":10,"57":4,"58":3,"60":1,"61":3,"63":6,"64":1,"65":1,"66":3,"67":15,"69":3,"74":3,"75":1,"77":2,"78":2,"79":2,"84":1,"85":2,"86":4,"88":2,"89":1,"97":2,"98":1,"100":3,"101":3,"102":1,"103":1,"104":3,"105":2,"106":10,"108":3,"114":1,"163":2,"170":1,"178":1,"179":15,"181":63,"182":2,"183":40}}],["hd",{"2":{"181":3}}],["hh",{"2":{"155":2,"156":3}}],["huggingface",{"2":{"183":6}}],["hundred",{"2":{"181":2}}],["hundredth",{"2":{"77":1}}],["hundreds",{"2":{"75":2}}],["humans",{"2":{"181":1}}],["human",{"2":{"17":1,"155":1,"156":1,"181":4}}],["htmlstyler",{"2":{"181":1,"183":13}}],["html",{"2":{"61":2,"181":1,"183":15}}],["https",{"2":{"20":1,"37":1,"58":2,"75":1,"108":2,"112":1,"179":2,"180":1,"181":18,"183":7}}],["http",{"2":{"10":2,"23":1,"28":1,"65":3,"67":2,"74":1,"106":2,"180":1,"181":60,"183":5}}],["href=",{"2":{"58":1,"181":6,"183":1}}],["hcat",{"2":{"46":1,"181":3,"183":4}}],["hit",{"2":{"181":3,"183":2}}],["his",{"2":{"89":2,"181":2}}],["history",{"2":{"52":1,"88":1,"103":1,"179":5,"181":110,"183":1}}],["hint",{"2":{"52":2,"179":2}}],["hints",{"2":{"52":1,"179":1}}],["hi",{"2":{"21":2,"22":1,"23":3,"26":2,"27":1,"28":1,"29":3,"30":1,"31":1,"34":3,"37":1,"39":1,"40":1,"42":2,"51":2,"52":2,"87":3,"89":3,"92":6,"93":3,"98":1,"163":1,"179":12,"181":24}}],["highly",{"2":{"121":2,"127":1,"138":1}}],["highlevel",{"2":{"52":2,"179":2}}],["highlighted",{"2":{"181":3}}],["highlights",{"0":{"49":1,"54":1,"57":1,"60":1}}],["highlighting",{"2":{"20":1,"61":1,"141":1,"142":1,"181":2,"183":1}}],["highlight",{"2":{"10":1,"60":1,"67":1,"130":1,"141":1,"155":1,"183":1}}],["higher",{"2":{"17":1,"52":1,"58":1,"63":1,"67":2,"179":3,"181":8,"183":5}}],["highest",{"2":{"7":1,"61":1,"183":1}}],["high",{"2":{"3":1,"24":3,"61":2,"63":2,"67":2,"105":1,"107":2,"160":1,"164":1,"166":1,"168":1,"173":1,"176":1,"181":5,"183":6}}],["hmm",{"2":{"12":1,"41":1,"181":1}}],["hyderephraser",{"2":{"181":1,"183":3}}],["hyde",{"2":{"66":1,"124":1,"125":1,"183":3}}],["hypothetical",{"2":{"8":1,"124":3,"125":2,"183":2}}],["hybrid",{"2":{"8":1,"66":1,"67":2,"183":4}}],["her",{"2":{"114":1}}],["here>",{"2":{"170":1,"178":1}}],["here",{"2":{"23":1,"24":1,"26":1,"29":1,"30":1,"31":2,"32":1,"34":1,"41":1,"42":1,"52":1,"58":1,"63":1,"84":1,"107":1,"179":1,"181":26}}],["hence",{"2":{"100":1,"181":3}}],["heals",{"2":{"183":1}}],["healing",{"2":{"88":1,"106":1}}],["heavy",{"2":{"108":1}}],["heavily",{"2":{"86":1,"114":1}}],["heavens",{"2":{"58":1,"181":1}}],["hear",{"2":{"58":2,"181":2}}],["hearty",{"2":{"89":2,"181":2}}],["heart",{"2":{"35":1}}],["header",{"2":{"181":3}}],["headers",{"2":{"2":1,"74":1,"86":1,"181":12}}],["headings",{"2":{"167":1}}],["headlines",{"2":{"163":1}}],["head",{"2":{"41":1}}],["he",{"2":{"19":1,"181":7}}],["height",{"2":{"19":2,"181":12}}],["held",{"2":{"67":1,"183":1}}],["hello",{"2":{"12":1,"14":1,"22":1,"23":1,"26":1,"30":1,"31":1,"39":1,"40":1,"42":1,"52":3,"58":3,"87":1,"179":1,"181":30}}],["helping",{"2":{"183":1}}],["helpful",{"2":{"19":1,"23":1,"26":1,"34":1,"52":4,"53":1,"87":1,"92":1,"99":1,"121":3,"122":1,"130":1,"141":2,"160":1,"164":1,"166":1,"173":1,"179":2,"181":8}}],["helpfulness",{"2":{"6":1,"121":1}}],["helps",{"2":{"19":1,"181":1}}],["help",{"2":{"12":1,"19":1,"21":1,"23":3,"26":3,"30":1,"31":1,"34":1,"35":1,"39":1,"40":1,"41":2,"42":1,"51":1,"67":1,"82":1,"108":1,"155":1,"181":7,"183":1}}],["helper",{"2":{"10":1,"24":1,"37":1,"106":1,"181":2}}],["horizontal",{"2":{"183":1}}],["holding",{"2":{"183":3}}],["hold",{"2":{"179":1}}],["holds",{"2":{"2":1,"67":1,"108":1,"183":3}}],["hope",{"2":{"59":1,"60":1,"88":1,"107":1,"117":1,"118":1}}],["honor",{"2":{"52":1,"179":1}}],["hosting",{"2":{"29":1,"84":1}}],["host",{"2":{"29":2,"181":11}}],["hosted",{"2":{"25":1,"67":4,"70":1,"78":1,"100":1,"101":1,"108":2,"181":2,"183":4}}],["hood",{"2":{"2":1,"18":1,"23":1,"26":1,"28":1,"49":1,"61":1,"99":1,"107":2,"108":1,"181":2}}],["however",{"2":{"3":2,"15":1,"24":1,"75":1,"181":1,"183":1}}],["how",{"0":{"77":1,"78":1,"87":1,"88":1,"89":1,"91":1,"92":1,"99":1},"1":{"100":1,"101":1,"102":1,"103":1,"104":1,"105":1,"106":1,"107":1,"108":1},"2":{"0":2,"7":1,"8":1,"10":1,"11":1,"13":4,"21":1,"22":1,"23":2,"24":4,"26":2,"30":1,"32":1,"36":1,"37":1,"51":1,"52":4,"58":2,"61":7,"63":1,"67":2,"71":4,"74":1,"85":1,"87":2,"88":3,"99":2,"103":1,"104":1,"107":1,"108":3,"121":4,"122":1,"140":3,"142":2,"170":1,"178":1,"179":5,"181":32,"183":6}}],["happy",{"2":{"181":1}}],["happened",{"2":{"131":1}}],["happens",{"2":{"100":1,"155":1}}],["happening",{"2":{"61":1}}],["haiku",{"2":{"181":4}}],["hamming",{"2":{"181":1,"183":9}}],["half",{"2":{"67":1,"91":1,"183":1}}],["hallucination",{"2":{"17":1}}],["hackable",{"2":{"63":1}}],["had",{"2":{"24":1,"181":1}}],["harder",{"2":{"67":2,"183":2}}],["hard",{"2":{"13":1,"19":1,"41":1,"76":2,"97":1,"179":1}}],["handling",{"2":{"52":2,"88":1,"179":1,"181":2}}],["handles",{"2":{"88":1,"181":1,"183":1}}],["handlebars",{"2":{"98":1}}],["handlebar",{"2":{"12":1,"181":1}}],["handle",{"2":{"7":1,"61":1,"67":2,"88":2,"102":1,"108":1,"181":10,"183":5}}],["handcraft",{"2":{"3":1}}],["hash",{"2":{"181":2,"183":1}}],["hashed",{"2":{"67":5,"181":1,"183":9}}],["hasn",{"2":{"52":2,"179":2}}],["has",{"2":{"0":2,"15":1,"21":1,"28":1,"42":1,"52":4,"65":1,"74":2,"88":1,"117":1,"118":1,"130":1,"131":1,"142":1,"155":1,"179":2,"181":23,"183":3}}],["having",{"0":{"73":2},"2":{"0":1,"58":1,"105":1,"181":1}}],["have",{"0":{"87":1,"88":1,"90":1},"2":{"0":1,"5":1,"6":1,"7":6,"10":2,"11":1,"12":5,"13":1,"15":1,"19":1,"21":2,"22":1,"23":5,"24":3,"26":4,"27":1,"28":2,"30":1,"31":2,"34":1,"35":2,"36":1,"37":1,"41":4,"42":1,"49":2,"51":1,"52":1,"57":1,"60":1,"61":1,"63":3,"67":4,"74":2,"75":1,"77":1,"78":1,"85":1,"88":1,"89":2,"92":2,"95":1,"96":2,"102":2,"103":1,"105":3,"106":3,"108":4,"114":1,"115":1,"117":1,"118":1,"120":1,"125":1,"141":1,"152":1,"162":1,"165":1,"167":2,"171":1,"172":1,"179":3,"181":32,"183":17}}],["omit",{"2":{"157":1}}],["o1",{"2":{"78":7}}],["observability",{"2":{"181":2}}],["observe",{"2":{"63":1}}],["obj",{"2":{"108":3}}],["objective",{"2":{"125":1,"127":1}}],["objects",{"2":{"89":1,"100":1,"149":1,"181":30,"183":2}}],["object>",{"2":{"88":1}}],["object",{"2":{"6":1,"12":4,"21":2,"35":1,"51":2,"52":10,"66":1,"67":6,"88":1,"93":1,"97":1,"100":1,"101":1,"107":1,"108":12,"179":15,"181":75,"183":10}}],["obtained",{"2":{"181":1}}],["obtain",{"2":{"52":1,"67":2,"107":1,"179":1,"183":2}}],["ocean",{"2":{"58":4,"181":4}}],["occur",{"2":{"156":1}}],["occurrences",{"2":{"58":1,"181":1}}],["occurred",{"2":{"52":2,"179":3}}],["occursin",{"2":{"179":2}}],["occurs",{"2":{"52":1,"181":1}}],["ocrtask",{"0":{"153":1},"2":{"20":2,"181":4}}],["ocr",{"0":{"20":1},"2":{"20":1,"153":1,"181":4}}],["overwrite",{"2":{"181":1,"183":1}}],["overwritten",{"2":{"52":1,"179":1}}],["overrules",{"2":{"181":2}}],["overriden",{"2":{"181":1}}],["overrides",{"2":{"181":4}}],["override",{"2":{"52":2,"85":1,"86":1,"179":2}}],["overall",{"2":{"121":2,"140":1,"142":1,"181":3}}],["overarching",{"2":{"63":1}}],["overload",{"2":{"183":1}}],["overloaded",{"2":{"93":1}}],["overlaps",{"2":{"183":2}}],["overlapping",{"2":{"183":1}}],["overlap",{"2":{"67":1,"183":1}}],["overhead",{"2":{"28":1}}],["over",{"2":{"19":1,"114":1,"120":1,"155":1,"156":1,"159":1,"170":1,"171":1,"172":1,"178":1,"181":12,"182":1}}],["overview",{"0":{"10":1,"63":1,"106":1},"2":{"0":1,"75":1}}],["o",{"2":{"18":2,"88":4,"181":5}}],["olama",{"2":{"181":1}}],["oldest",{"2":{"181":1}}],["old",{"2":{"12":3}}],["ollamamanagedschema",{"2":{"22":1,"42":2,"181":12}}],["ollama",{"0":{"22":1,"37":1,"84":1},"1":{"38":1,"39":1,"40":1,"41":1,"42":1,"43":1,"44":1,"45":1,"46":1,"47":1},"2":{"0":2,"22":4,"28":1,"37":4,"42":1,"69":3,"70":1,"77":1,"83":1,"84":8,"85":2,"100":1,"101":1,"181":21}}],["ollamaschema",{"2":{"0":1,"37":1,"42":5,"47":1,"85":3,"181":3}}],["origin",{"2":{"181":1}}],["originated",{"2":{"181":3}}],["originally",{"2":{"181":2}}],["original",{"2":{"58":4,"66":1,"93":1,"117":6,"118":8,"121":1,"127":1,"128":1,"130":1,"153":1,"179":1,"181":9,"183":11}}],["oriented",{"2":{"162":2}}],["orientation",{"2":{"144":1,"145":1,"147":1}}],["oro1m",{"2":{"78":2}}],["oro1",{"2":{"78":2}}],["organization",{"2":{"140":1,"181":2}}],["organize",{"2":{"130":1,"155":2,"156":1}}],["org",{"2":{"58":1,"179":2,"181":1}}],["ordering",{"2":{"66":1,"179":4}}],["ordered",{"2":{"58":1,"181":2}}],["orders",{"2":{"20":1,"181":2}}],["order",{"2":{"6":1,"58":1,"67":1,"71":1,"112":1,"181":1,"183":3}}],["or",{"0":{"85":1,"86":1},"2":{"5":3,"6":3,"7":6,"8":1,"10":8,"11":3,"13":1,"17":2,"18":4,"19":1,"20":1,"21":4,"23":3,"24":3,"26":3,"29":1,"30":1,"31":1,"34":1,"37":2,"39":1,"41":1,"42":2,"43":1,"46":1,"49":2,"51":3,"52":23,"55":1,"57":1,"58":8,"59":1,"60":1,"63":5,"64":3,"66":2,"67":20,"69":2,"71":1,"73":2,"78":4,"84":1,"85":1,"86":1,"88":3,"89":2,"93":1,"95":1,"97":1,"100":2,"101":2,"103":2,"104":2,"105":2,"106":8,"112":1,"114":1,"117":1,"118":1,"120":2,"126":3,"127":2,"130":4,"131":1,"137":1,"138":1,"140":2,"141":5,"142":6,"152":1,"155":2,"156":4,"161":3,"163":3,"167":1,"170":2,"171":1,"172":1,"174":1,"178":2,"179":24,"180":1,"181":187,"183":51}}],["our",{"2":{"3":1,"6":1,"7":2,"21":1,"37":1,"51":1,"52":3,"71":4,"74":2,"88":6,"91":1,"107":2,"108":3,"126":2,"174":1,"179":3,"181":6,"183":3}}],["outside",{"2":{"174":1}}],["outlined",{"2":{"144":1,"145":1,"147":1}}],["outline",{"2":{"130":1,"165":2,"167":2}}],["outcomes",{"2":{"52":1,"88":1,"179":2}}],["outcome",{"2":{"49":1,"140":6,"141":6,"142":6,"174":5,"179":3,"181":2}}],["outer",{"2":{"6":1,"7":4}}],["outerjoin",{"2":{"6":1}}],["output`",{"2":{"21":1,"51":1,"52":1,"179":1}}],["outputs",{"2":{"7":1,"20":1,"21":2,"24":1,"51":1,"52":3,"108":1,"181":23}}],["output",{"0":{"21":1},"2":{"6":2,"7":8,"10":3,"21":4,"31":1,"49":4,"51":9,"52":43,"58":4,"61":2,"67":1,"88":7,"89":2,"93":1,"104":3,"106":3,"107":1,"108":17,"112":1,"144":1,"145":1,"147":1,"150":1,"157":2,"161":1,"167":1,"179":32,"181":65,"183":11}}],["out",{"2":{"2":1,"10":1,"21":9,"51":5,"52":36,"59":1,"67":4,"71":1,"74":1,"91":1,"106":1,"107":1,"126":1,"128":1,"179":35,"181":11,"183":7}}],["own",{"2":{"2":1,"15":1,"52":1,"59":1,"67":3,"86":1,"179":1,"181":6,"183":4}}],["otherwise",{"2":{"52":2,"74":1,"179":4,"181":22,"183":1}}],["others",{"2":{"35":1,"108":1,"155":1}}],["other",{"0":{"23":1,"27":1},"2":{"2":1,"23":2,"27":1,"32":1,"35":1,"36":1,"49":1,"52":4,"58":1,"61":1,"67":3,"69":1,"70":1,"73":1,"79":1,"85":1,"86":1,"89":2,"93":1,"95":1,"101":1,"114":1,"138":1,"140":1,"156":2,"157":1,"169":1,"177":1,"179":4,"181":13,"183":5}}],["ops",{"2":{"183":1}}],["op",{"2":{"181":1,"183":10}}],["opposite",{"2":{"183":1}}],["opposed",{"2":{"11":1,"181":1}}],["opportunity",{"2":{"117":1,"118":1}}],["opt",{"2":{"71":1}}],["option",{"2":{"138":2,"181":2,"183":3}}],["options",{"2":{"52":1,"63":1,"67":1,"74":1,"85":1,"95":1,"179":1,"183":2}}],["options=",{"2":{"37":2}}],["optional",{"2":{"2":2,"10":3,"52":2,"61":1,"66":1,"67":1,"106":3,"179":2,"181":44,"183":2}}],["optionally",{"2":{"2":1,"58":1,"67":1,"89":1,"108":1,"181":4,"183":2}}],["optimized",{"2":{"127":2,"179":1}}],["optimizes",{"2":{"124":1}}],["optimize",{"2":{"21":1,"179":1}}],["operate",{"2":{"21":1,"51":1,"52":1,"67":1,"179":1,"183":1}}],["operations",{"2":{"14":1,"52":2,"67":2,"181":4,"183":2}}],["operation",{"2":{"7":7,"179":1,"181":1,"183":4}}],["opens",{"2":{"183":2}}],["openrouteropenaischema",{"2":{"181":2}}],["openrouter",{"2":{"78":6,"181":6}}],["opened",{"2":{"74":1}}],["opentagger",{"2":{"67":1,"181":1,"183":4}}],["opening",{"2":{"24":1}}],["openhermes2",{"2":{"22":3,"37":1,"40":1,"47":1,"84":2,"181":10}}],["open",{"0":{"83":1},"2":{"11":1,"23":1,"26":1,"52":1,"57":1,"60":1,"61":1,"73":1,"83":1,"84":1,"108":2,"179":1,"181":1,"183":1}}],["openaistream",{"2":{"181":3}}],["openaischema",{"2":{"0":2,"93":5,"101":1,"102":3,"107":3,"181":43}}],["openaiapi",{"0":{"73":1}}],["openai",{"0":{"23":1,"27":1,"69":1,"70":1,"71":1,"72":1,"73":1,"74":1,"76":1,"78":1},"1":{"70":1},"2":{"0":4,"10":1,"16":1,"21":1,"23":3,"25":1,"26":1,"27":2,"42":1,"51":1,"52":1,"69":1,"71":6,"72":4,"73":3,"74":2,"75":2,"76":3,"77":2,"78":1,"79":6,"80":2,"81":1,"86":3,"92":1,"95":9,"100":2,"101":3,"102":3,"106":1,"107":6,"108":1,"114":1,"179":1,"181":83,"183":1}}],["ongoing",{"2":{"181":1}}],["online",{"2":{"72":1,"95":1,"181":4}}],["only",{"2":{"2":1,"7":8,"8":1,"10":3,"12":1,"18":1,"21":6,"23":2,"24":5,"27":2,"32":1,"42":1,"49":2,"51":5,"52":15,"54":1,"58":2,"61":1,"66":1,"67":2,"74":1,"85":1,"89":1,"92":2,"105":1,"106":4,"107":3,"108":2,"110":1,"112":1,"114":1,"117":3,"118":3,"120":1,"127":1,"130":1,"136":1,"138":1,"155":3,"156":1,"157":1,"160":1,"163":1,"164":1,"166":1,"168":1,"173":1,"174":1,"176":1,"179":23,"181":75,"183":10}}],["once",{"2":{"6":1,"7":1,"59":1,"84":1,"86":1,"91":1,"107":1,"165":1,"167":1,"181":7}}],["ones",{"2":{"23":1,"24":1,"27":1,"67":2,"70":1,"114":1,"181":1,"183":2}}],["one",{"0":{"5":1,"45":1},"2":{"5":1,"6":2,"10":1,"12":1,"13":1,"15":1,"21":1,"24":1,"25":1,"30":1,"31":1,"41":1,"42":1,"43":1,"45":1,"51":1,"52":4,"54":1,"61":1,"63":1,"65":1,"66":1,"67":1,"74":1,"77":2,"79":1,"88":2,"89":1,"105":1,"106":1,"131":1,"136":2,"138":2,"157":2,"163":1,"170":1,"178":1,"179":5,"181":35,"183":10}}],["on",{"2":{"0":1,"7":4,"10":1,"11":1,"13":3,"17":2,"19":1,"21":3,"22":1,"23":2,"24":2,"26":1,"30":1,"37":1,"42":1,"47":1,"51":3,"52":12,"58":4,"60":1,"61":7,"63":1,"66":5,"67":8,"71":1,"72":1,"73":2,"74":1,"75":2,"76":1,"78":1,"79":2,"89":3,"95":1,"105":1,"106":1,"108":6,"110":2,"112":4,"114":1,"117":2,"118":2,"120":1,"121":3,"122":3,"126":1,"130":3,"131":2,"132":1,"138":1,"140":3,"141":3,"142":3,"152":1,"156":2,"161":2,"163":2,"167":1,"171":1,"172":1,"174":2,"179":23,"181":66,"183":18}}],["office",{"2":{"163":1}}],["offloaded",{"2":{"37":1}}],["offload",{"2":{"28":1}}],["off",{"2":{"15":1,"79":1,"181":1}}],["offering",{"2":{"71":1}}],["offers",{"2":{"21":1}}],["offer",{"2":{"13":1,"15":1,"140":1,"142":1}}],["often",{"2":{"5":1,"6":1,"7":2,"11":1,"29":1,"67":2,"181":21,"183":5}}],["of",{"0":{"0":1,"51":1},"2":{"0":5,"2":5,"3":3,"4":1,"5":4,"6":5,"7":28,"8":1,"10":7,"11":2,"12":3,"13":4,"14":1,"15":2,"16":1,"18":1,"19":4,"20":4,"21":5,"23":4,"24":15,"26":4,"28":1,"29":1,"30":1,"31":3,"32":1,"35":1,"36":2,"37":1,"41":1,"42":1,"45":2,"46":1,"47":1,"48":1,"49":8,"51":5,"52":34,"55":4,"56":2,"57":4,"58":29,"59":3,"60":7,"61":13,"63":7,"66":4,"67":59,"69":2,"71":2,"72":1,"73":2,"74":6,"75":3,"76":2,"77":4,"78":4,"83":1,"86":2,"88":5,"89":2,"91":5,"92":5,"93":2,"96":2,"97":7,"98":6,"99":2,"100":3,"101":2,"102":3,"103":1,"104":2,"105":4,"106":7,"107":8,"108":15,"114":1,"120":4,"121":3,"126":1,"127":1,"130":10,"131":2,"136":2,"138":4,"140":3,"141":4,"142":3,"144":1,"152":3,"155":8,"156":6,"157":2,"160":1,"161":2,"163":2,"164":1,"166":2,"168":2,"169":1,"170":7,"171":2,"172":1,"173":2,"174":4,"176":2,"177":1,"178":7,"179":75,"180":4,"181":429,"183":249}}],["sse",{"2":{"181":2}}],["ss",{"2":{"155":2,"156":3}}],["swap",{"2":{"67":1,"183":2}}],["swiftly",{"2":{"58":1,"181":1}}],["switching",{"2":{"52":1,"179":1}}],["switch",{"2":{"11":2,"51":1}}],["sqrt",{"2":{"179":1}}],["square",{"2":{"61":1,"67":2,"183":2}}],["sqlcoder",{"2":{"84":1}}],["sqlservercentral",{"2":{"20":1,"181":2}}],["sql",{"2":{"20":3,"181":6}}],["sk",{"2":{"181":1}}],["skilled",{"2":{"163":1}}],["skips",{"2":{"89":1,"105":1,"181":13,"183":2}}],["skipped",{"2":{"52":1,"181":1}}],["skip",{"2":{"52":9,"67":5,"174":1,"179":2,"181":7,"183":8}}],["sky",{"2":{"58":1,"181":3}}],["src",{"2":{"58":1,"66":1,"86":1,"181":7,"183":1}}],["svilupp",{"2":{"58":1,"181":6,"183":1}}],["snippet",{"2":{"82":1,"124":1}}],["snippets",{"2":{"52":1,"171":1,"172":1,"181":1,"183":7}}],["snowball",{"2":{"59":1,"183":1}}],["snow",{"2":{"58":1,"181":1}}],["slice",{"2":{"183":4}}],["slicing",{"2":{"171":1,"172":1}}],["sliding",{"2":{"181":1,"183":3}}],["slightly",{"2":{"58":1,"181":3}}],["slots",{"2":{"167":1}}],["slot",{"2":{"108":1}}],["slowly",{"2":{"98":1}}],["slow",{"2":{"98":1,"181":1}}],["sleep",{"2":{"74":1,"181":1}}],["slack",{"2":{"57":1,"60":1}}],["smart",{"2":{"181":1}}],["smarter",{"2":{"181":1}}],["smallint",{"2":{"88":5}}],["small",{"2":{"7":1,"11":1,"23":1,"26":1,"66":1,"155":1,"169":1,"172":1,"177":1,"179":2,"181":4}}],["smaller",{"2":{"2":1,"58":6,"108":1,"179":1,"181":7}}],["smoke",{"2":{"58":1,"181":1}}],["smith",{"2":{"114":2}}],["smiling",{"2":{"42":1}}],["smiles",{"2":{"40":1,"41":1}}],["smirks",{"2":{"41":1}}],["shift",{"2":{"183":1}}],["shiny",{"2":{"181":1}}],["shimmering",{"2":{"58":2,"181":2}}],["shell",{"2":{"173":1}}],["shapley",{"2":{"174":1}}],["shap",{"2":{"174":10}}],["sharegptschema",{"2":{"181":3}}],["sharegpt",{"2":{"91":1}}],["share",{"2":{"71":1,"72":1,"95":1}}],["shared",{"2":{"58":1,"65":2,"67":2,"92":1,"170":1,"178":1,"181":4,"183":2}}],["sharing",{"2":{"24":1}}],["shallow",{"2":{"52":1,"179":1}}],["shall",{"2":{"12":1}}],["shot",{"2":{"179":1}}],["shortcut",{"2":{"183":1}}],["shortcuts",{"2":{"130":1,"131":1,"132":1}}],["short",{"2":{"52":1,"58":1,"61":1,"114":1,"120":1,"122":1,"155":2,"161":1,"163":1,"179":1,"181":7}}],["shorter",{"2":{"29":1}}],["should",{"2":{"2":1,"7":1,"12":1,"22":1,"35":1,"41":1,"52":1,"58":1,"63":1,"67":2,"79":1,"80":1,"84":2,"88":2,"93":1,"95":1,"104":1,"108":2,"112":3,"120":1,"138":2,"152":2,"155":4,"156":4,"157":7,"163":2,"167":1,"174":2,"179":6,"181":37,"183":2}}],["showcase",{"2":{"150":1}}],["shows",{"2":{"20":1,"24":1,"63":1,"91":1,"181":4}}],["show",{"2":{"2":1,"7":1,"52":2,"84":1,"88":1,"92":1,"179":3,"181":1}}],["side",{"2":{"107":1}}],["sister",{"2":{"91":1}}],["sink",{"2":{"181":1}}],["since",{"2":{"89":1,"107":1,"155":1}}],["singletons",{"2":{"181":5}}],["single",{"2":{"58":2,"91":1,"181":11,"183":4}}],["situations",{"2":{"69":1}}],["silent",{"2":{"58":2,"181":2}}],["sibblings",{"2":{"52":2,"179":2}}],["size=8",{"2":{"183":1}}],["size`",{"2":{"181":1}}],["size",{"2":{"45":2,"46":2,"47":1,"67":2,"181":5,"183":15}}],["sizes",{"2":{"8":1,"67":2,"183":11}}],["sig",{"2":{"108":3}}],["significant",{"2":{"120":1,"156":2}}],["signing",{"2":{"72":1}}],["sign",{"2":{"54":1}}],["signatures",{"2":{"52":1,"179":1,"181":2}}],["signature",{"2":{"21":1,"51":1,"63":1,"64":4,"108":7,"181":21}}],["sigh",{"2":{"41":1}}],["simultaneously",{"2":{"21":1,"51":1,"52":1,"179":1}}],["similarly",{"2":{"98":1,"181":1}}],["similarity",{"2":{"2":2,"8":1,"47":2,"57":1,"58":1,"66":2,"67":5,"181":1,"183":33}}],["similar",{"2":{"2":1,"10":2,"52":1,"58":1,"67":3,"103":1,"106":2,"179":2,"181":6,"183":6}}],["simplistic",{"2":{"181":1}}],["simplification",{"2":{"107":1}}],["simply",{"2":{"2":1,"10":1,"20":1,"24":1,"28":1,"29":1,"36":1,"52":1,"57":1,"67":1,"88":2,"89":2,"95":1,"97":2,"106":1,"117":1,"118":1,"149":1,"179":1,"181":6,"183":10}}],["simplebm25retriever",{"2":{"181":1,"183":4}}],["simpleanswerer",{"2":{"67":2,"181":1,"183":8}}],["simplegenerator",{"2":{"67":2,"181":1,"183":5}}],["simplerefiner",{"2":{"67":1,"181":1,"183":6}}],["simpleretriever",{"2":{"67":5,"181":1,"183":11}}],["simplerephraser",{"2":{"65":1,"181":1,"183":4}}],["simplest",{"2":{"63":1,"67":1,"87":1,"181":4,"183":1}}],["simpleindexer",{"2":{"61":2,"67":3,"181":1,"183":9}}],["simple",{"0":{"1":1,"34":1,"39":1,"45":1},"1":{"2":1},"2":{"7":1,"8":1,"10":1,"16":1,"22":1,"43":1,"47":1,"49":1,"52":1,"61":1,"74":2,"77":2,"84":1,"89":2,"106":1,"122":1,"134":1,"152":1,"179":1,"181":15,"183":10}}],["scene",{"2":{"181":1}}],["scenarios",{"2":{"52":1,"179":2,"183":1}}],["science",{"2":{"167":1,"174":1}}],["scientific",{"2":{"61":1}}],["scientist",{"2":{"24":2}}],["scans",{"2":{"181":3}}],["scan",{"2":{"155":1,"156":1,"181":4}}],["scanned",{"2":{"153":1}}],["scaled",{"2":{"183":1}}],["scale",{"2":{"61":1,"121":2,"122":2,"181":1,"183":6}}],["scoring=thompsonsampling",{"2":{"52":1,"179":1}}],["scoring",{"2":{"52":1,"121":1,"179":10,"183":1}}],["score==nothing",{"2":{"183":1}}],["scores2",{"2":{"183":3}}],["scores1",{"2":{"183":3}}],["scores=false",{"2":{"183":1}}],["scores",{"2":{"52":1,"61":1,"67":5,"121":1,"179":7,"183":28}}],["scored",{"2":{"21":1,"51":1,"52":1,"179":1}}],["scoreparametersstringstringstringsubstrin",{"2":{"7":1}}],["scoreretrieval",{"2":{"7":1}}],["score",{"2":{"6":3,"7":10,"10":2,"52":20,"61":1,"67":12,"121":2,"179":32,"181":5,"183":35}}],["scope",{"2":{"52":1,"179":1}}],["script",{"2":{"181":2}}],["scripting",{"2":{"173":2}}],["scratch",{"2":{"52":1,"181":2}}],["scratches",{"2":{"41":1}}],["scrollable",{"2":{"24":1}}],["screenshot",{"2":{"20":3,"153":1,"181":4}}],["schema=json3",{"2":{"108":2}}],["schema=openaischema",{"2":{"93":1,"181":1}}],["schema=myschema",{"2":{"86":1,"179":1}}],["schema=pt",{"2":{"85":1}}],["schema",{"0":{"42":1,"85":1},"2":{"0":5,"10":2,"22":5,"23":2,"26":1,"27":1,"29":1,"30":1,"31":1,"37":1,"39":1,"42":7,"45":2,"46":2,"47":2,"52":4,"85":5,"86":2,"92":2,"93":7,"101":1,"102":1,"106":2,"107":7,"108":5,"179":5,"181":205}}],["schemas",{"0":{"102":1},"2":{"0":1,"86":1,"93":1,"100":1,"102":2,"181":4}}],["satisfactory",{"2":{"141":2}}],["satisfy",{"2":{"130":1,"142":1}}],["saving",{"2":{"67":1,"181":2,"183":2}}],["saverschema",{"2":{"93":7,"181":16}}],["saves",{"2":{"52":1,"179":2,"181":4,"183":1}}],["saved",{"2":{"11":1,"24":2,"52":2,"67":1,"89":1,"93":1,"95":1,"105":1,"179":2,"181":7,"183":4}}],["save",{"2":{"2":2,"4":1,"7":1,"13":1,"24":4,"32":1,"52":2,"66":2,"72":1,"77":2,"89":3,"91":3,"93":2,"95":1,"108":1,"179":1,"181":29}}],["safety",{"2":{"58":1,"181":1}}],["safely",{"2":{"52":1,"181":1}}],["safe",{"2":{"52":4,"181":6}}],["sampling",{"2":{"179":5,"181":2}}],["samplenode",{"2":{"52":25,"179":45,"181":1}}],["sample",{"2":{"52":23,"179":58,"181":11,"183":1}}],["samples=1",{"2":{"52":2,"179":2}}],["samples=2`",{"2":{"21":1,"51":1,"52":1,"179":1}}],["samples",{"2":{"21":3,"49":3,"51":3,"52":33,"179":55,"181":4}}],["same",{"2":{"6":1,"10":2,"11":1,"21":2,"34":1,"49":3,"51":1,"52":2,"58":2,"67":3,"74":1,"79":1,"88":2,"89":1,"95":1,"104":1,"106":2,"107":1,"130":1,"131":1,"179":9,"181":18,"183":18}}],["salty",{"2":{"89":2,"181":2}}],["salt",{"2":{"19":2}}],["san",{"2":{"19":1}}],["says",{"2":{"75":1,"140":2,"141":2,"142":2}}],["say",{"2":{"12":1,"21":2,"22":1,"23":3,"26":2,"27":1,"28":1,"29":3,"30":1,"31":1,"34":2,"37":1,"39":1,"40":1,"42":2,"51":2,"52":2,"87":1,"89":3,"92":6,"93":3,"98":1,"107":1,"110":1,"112":1,"117":1,"118":1,"179":11,"181":22}}],["said",{"2":{"10":1,"49":1,"106":1,"181":2}}],["sorted",{"2":{"183":1}}],["sort",{"2":{"181":1}}],["sorry",{"2":{"181":3}}],["sonnet",{"2":{"181":1}}],["soft",{"2":{"76":2}}],["solve",{"2":{"169":1,"171":1,"172":1,"177":2}}],["solving",{"2":{"142":1,"169":2,"171":2,"172":2,"177":1}}],["solutions",{"2":{"95":1}}],["solution",{"2":{"73":1,"130":2,"169":1,"171":1,"172":1,"177":1,"181":2}}],["solid",{"2":{"58":1,"181":6,"183":1}}],["source=",{"2":{"183":1}}],["source2",{"2":{"67":1,"183":1}}],["source1",{"2":{"67":1,"183":1}}],["sourced",{"2":{"10":1}}],["source",{"0":{"83":1},"2":{"5":1,"6":1,"10":1,"13":1,"23":1,"24":1,"26":1,"52":14,"55":1,"58":7,"61":1,"67":17,"83":1,"89":1,"108":2,"110":1,"112":1,"114":1,"115":1,"117":1,"118":1,"120":1,"121":1,"122":1,"124":1,"125":1,"126":1,"127":1,"128":1,"130":1,"131":1,"132":1,"134":1,"136":1,"137":1,"138":1,"140":1,"141":1,"142":1,"144":1,"145":1,"147":2,"149":1,"150":1,"152":1,"153":1,"155":1,"156":1,"157":1,"160":1,"161":1,"162":1,"163":1,"164":1,"165":1,"166":1,"167":1,"168":1,"169":1,"170":2,"171":1,"172":1,"173":1,"174":1,"176":1,"177":1,"178":2,"179":38,"180":2,"181":193,"182":1,"183":159}}],["sources=false",{"2":{"183":1}}],["sources=map",{"2":{"61":1}}],["sources",{"2":{"4":2,"6":1,"61":3,"67":17,"183":50}}],["so",{"2":{"4":1,"7":1,"15":2,"18":1,"21":4,"23":2,"24":1,"26":2,"30":1,"31":2,"36":1,"42":1,"51":1,"52":1,"58":1,"63":1,"67":3,"73":1,"74":2,"75":1,"78":1,"80":1,"89":2,"103":1,"105":1,"107":3,"108":5,"114":1,"130":1,"131":1,"155":1,"179":3,"181":22,"182":1,"183":4}}],["sometimes",{"2":{"108":1,"118":1}}],["something",{"2":{"30":1,"31":1,"41":1,"42":1,"107":1,"163":2}}],["somewhere",{"2":{"105":1}}],["some",{"2":{"2":2,"7":1,"8":1,"10":1,"20":2,"21":2,"22":1,"24":2,"27":1,"28":1,"37":1,"42":2,"51":1,"52":1,"57":2,"58":1,"61":4,"63":3,"66":1,"67":2,"78":3,"84":1,"86":1,"88":1,"95":1,"103":1,"105":2,"106":1,"108":3,"117":2,"152":1,"156":1,"165":1,"167":1,"169":1,"177":1,"179":1,"181":29,"183":12}}],["synthetic",{"2":{"183":2}}],["syntactically",{"2":{"142":1}}],["syntax",{"2":{"2":1,"13":1,"20":1,"21":1,"24":4,"51":1,"52":1,"74":1,"88":1,"89":1,"91":1,"108":1,"142":3,"163":1,"168":1,"171":3,"172":3,"176":1,"179":2,"181":8}}],["sync",{"2":{"181":1}}],["synced",{"2":{"72":1,"95":1}}],["synonyms",{"2":{"126":2}}],["symphony",{"2":{"58":1,"181":1}}],["symbols",{"2":{"181":3,"183":3}}],["symbolic",{"2":{"92":1}}],["symbol=",{"2":{"67":1,"179":1,"183":1}}],["symbol",{"2":{"6":1,"13":2,"24":3,"61":1,"67":3,"89":2,"105":1,"179":6,"181":54,"183":32}}],["system+user",{"2":{"179":1}}],["systematic",{"2":{"169":1,"171":1,"172":1,"177":1}}],["system=",{"2":{"89":1,"93":1,"105":1,"181":3}}],["systemmessage",{"2":{"12":3,"24":3,"35":1,"41":1,"78":1,"87":1,"89":2,"92":3,"104":1,"105":2,"107":1,"181":13}}],["systems",{"2":{"10":1,"58":2,"106":1,"120":1,"181":5}}],["system",{"0":{"1":1,"63":1},"1":{"2":1},"2":{"3":1,"12":1,"13":2,"17":2,"24":2,"36":2,"52":1,"61":2,"63":2,"78":3,"79":1,"89":3,"92":3,"102":1,"104":1,"105":3,"107":1,"110":1,"112":1,"114":1,"115":1,"117":1,"118":1,"120":1,"121":1,"122":1,"124":1,"125":1,"126":1,"127":1,"128":1,"130":1,"131":1,"132":1,"134":1,"136":1,"137":1,"138":1,"140":1,"141":1,"142":1,"144":1,"145":1,"147":1,"149":5,"150":2,"152":1,"153":1,"155":1,"156":1,"157":1,"160":1,"161":1,"162":1,"163":1,"164":1,"165":1,"166":1,"167":1,"168":1,"169":1,"170":1,"171":1,"172":1,"173":1,"174":1,"176":1,"177":1,"178":1,"179":2,"181":105}}],["stemmer",{"2":{"183":2}}],["stemmer=nothing",{"2":{"183":1}}],["stemming",{"2":{"183":1}}],["steer",{"2":{"181":3}}],["steam",{"2":{"181":3}}],["steroids",{"2":{"108":1}}],["step=4",{"2":{"183":1}}],["steps`",{"2":{"156":1}}],["steps",{"2":{"24":1,"61":3,"63":1,"66":1,"67":4,"106":1,"107":1,"130":4,"155":1,"156":14,"163":1,"169":1,"171":1,"172":1,"181":2,"183":9}}],["step",{"2":{"2":1,"8":1,"21":2,"51":2,"52":3,"59":1,"61":4,"63":7,"65":3,"66":2,"67":10,"88":1,"89":1,"92":2,"105":1,"107":4,"108":1,"117":1,"118":1,"124":1,"125":1,"127":1,"128":1,"130":6,"131":4,"132":2,"140":2,"141":2,"142":2,"144":2,"165":2,"167":2,"169":2,"177":2,"179":3,"181":2,"183":21}}],["stipple",{"2":{"61":1}}],["still",{"2":{"1":1,"61":1,"73":1,"181":2}}],["stylistic",{"2":{"140":2}}],["styling",{"2":{"61":1,"183":5}}],["styled",{"2":{"183":1}}],["styles=",{"2":{"183":3}}],["styles",{"2":{"181":2,"183":10}}],["styler=rt",{"2":{"183":4}}],["styler",{"2":{"181":1,"183":21}}],["style=",{"2":{"58":1,"181":6,"183":1}}],["style",{"2":{"2":1,"5":2,"6":1,"7":11,"58":1,"67":1,"92":1,"98":1,"140":2,"144":1,"145":1,"147":1,"150":1,"170":1,"174":1,"178":1,"181":12,"183":12}}],["stop",{"2":{"76":1,"181":2,"183":1}}],["stopwords",{"2":{"57":2,"183":4}}],["stood",{"2":{"58":1,"181":1}}],["storage",{"2":{"181":1,"183":2}}],["storing",{"2":{"67":1,"183":8}}],["storyteller",{"2":{"174":1}}],["storytellerexplainshap",{"0":{"174":1}}],["storytelling",{"2":{"174":1}}],["story",{"2":{"58":4,"174":6,"181":4}}],["store",{"2":{"87":1,"89":3,"181":17}}],["stored",{"2":{"52":2,"67":1,"179":2,"183":1}}],["stores",{"2":{"10":1,"49":1,"52":2,"106":1,"179":2,"181":3,"183":3}}],["stdout",{"2":{"52":12,"179":1,"181":23}}],["stub",{"2":{"181":1}}],["stumbled",{"2":{"35":1,"181":1}}],["study",{"2":{"155":1}}],["studying",{"2":{"5":1,"7":1,"156":1}}],["studies",{"2":{"61":1}}],["studied",{"2":{"5":1,"7":1}}],["studio",{"0":{"32":1},"1":{"33":1,"34":1,"35":1,"36":1},"2":{"32":2,"36":1,"181":2}}],["strength",{"2":{"183":2}}],["stream",{"2":{"181":18,"183":3}}],["streaming",{"2":{"181":17}}],["streamed",{"2":{"181":5}}],["streamchunk",{"2":{"181":8}}],["streamcallback",{"2":{"181":42}}],["strong",{"2":{"181":4}}],["strongly",{"2":{"10":1}}],["strategies",{"2":{"66":1}}],["strategy",{"2":{"2":1,"66":5,"67":2,"181":2,"183":5}}],["stranger",{"2":{"58":1,"181":1}}],["strict",{"2":{"140":1,"144":1,"145":1,"147":1,"181":19}}],["strictly",{"2":{"66":1,"120":1,"156":1,"170":1,"178":1}}],["strin",{"2":{"108":1}}],["string=",{"2":{"58":2,"181":3}}],["strings",{"2":{"7":1,"57":3,"58":5,"67":3,"181":19,"183":16}}],["string",{"0":{"40":1},"2":{"7":4,"10":1,"13":5,"19":1,"21":1,"24":8,"31":2,"34":1,"42":1,"51":1,"52":17,"57":6,"58":30,"61":6,"67":4,"88":4,"89":5,"92":1,"93":1,"97":2,"106":1,"107":2,"108":15,"162":1,"170":1,"171":1,"172":1,"178":1,"179":16,"180":1,"181":275,"183":34}}],["stripping",{"2":{"128":1}}],["strip",{"2":{"58":1,"126":1,"128":1,"181":1}}],["struggle",{"2":{"52":1,"179":1}}],["structural",{"2":{"140":1}}],["structures",{"2":{"150":1,"171":1,"172":1,"179":1}}],["structured",{"2":{"10":1,"19":1,"31":1,"63":1,"106":1,"108":1,"144":1,"145":1,"147":1,"181":8}}],["structure",{"2":{"10":1,"19":1,"52":4,"58":3,"106":1,"153":1,"155":2,"156":3,"157":1,"179":3,"181":9,"183":2}}],["structtypes",{"2":{"108":1}}],["structs",{"2":{"67":1,"104":1,"108":1,"181":2,"183":1}}],["struct",{"2":{"10":2,"19":4,"31":1,"49":2,"52":4,"61":1,"63":3,"67":1,"86":1,"88":5,"106":2,"108":7,"114":1,"179":4,"181":48,"183":8}}],["str",{"0":{"97":1},"2":{"15":1,"58":1,"97":2,"98":4,"181":29,"183":1}}],["stands",{"2":{"181":1}}],["standards",{"2":{"140":1}}],["standard",{"0":{"40":1},"2":{"52":2,"67":1,"171":1,"172":1,"181":15,"183":7}}],["stays",{"2":{"74":1,"120":1,"179":1}}],["stage",{"2":{"67":1,"92":1,"183":13}}],["stabilizes",{"2":{"59":1,"179":1}}],["stark",{"2":{"181":1}}],["stars",{"2":{"58":3,"181":3}}],["star",{"2":{"12":1,"35":1,"41":1,"181":5}}],["start=1",{"2":{"183":1}}],["start|>assistant",{"2":{"181":1}}],["start|>user",{"2":{"181":1}}],["start>system",{"2":{"181":1}}],["startup",{"2":{"82":2,"181":1}}],["starter",{"2":{"61":1}}],["started",{"0":{"94":1},"1":{"95":1,"96":1,"97":1,"98":1},"2":{"59":1,"69":1}}],["starting",{"2":{"52":1,"136":1,"138":1,"179":2,"181":3}}],["starts",{"2":{"52":1,"179":2,"181":2}}],["startswith",{"2":{"52":1,"179":1}}],["start",{"0":{"97":1},"2":{"1":1,"19":1,"24":2,"28":2,"76":1,"79":1,"95":2,"97":1,"99":1,"163":1,"179":1,"181":2,"183":8}}],["status",{"2":{"179":2,"181":23}}],["statistical",{"2":{"61":1}}],["statistics",{"2":{"1":1}}],["stats",{"2":{"52":23,"179":33}}],["stated",{"2":{"142":1}}],["stateless",{"2":{"103":1}}],["state",{"2":{"19":1,"52":2,"69":1,"108":1,"179":2,"181":1}}],["statements",{"2":{"88":1,"170":1,"178":1,"181":1}}],["statement",{"2":{"17":2,"77":1,"103":1,"104":1,"137":4,"138":1,"181":6}}],["states",{"2":{"6":1}}],["splatting",{"2":{"183":1}}],["spliter",{"2":{"90":1}}],["splits",{"2":{"58":1,"67":1,"181":1,"183":7}}],["splitting",{"2":{"58":11,"67":2,"181":12,"183":3}}],["splitters",{"2":{"58":1,"181":1}}],["splitter",{"2":{"57":2,"58":10,"90":2,"181":13,"183":1}}],["split",{"2":{"8":1,"21":2,"51":2,"52":1,"57":3,"58":14,"67":1,"90":3,"155":1,"163":1,"179":4,"181":16,"183":4}}],["spillover",{"2":{"181":3}}],["spiders",{"2":{"181":1}}],["spider",{"2":{"18":1,"88":1,"181":1}}],["speaking",{"2":{"93":1,"181":3}}],["speak",{"2":{"89":3,"174":1,"181":4}}],["spend",{"2":{"76":1}}],["spending",{"0":{"76":1},"2":{"72":1,"76":1,"97":1}}],["speeds",{"2":{"183":3}}],["speed",{"2":{"46":1}}],["spec",{"2":{"181":2}}],["specs",{"2":{"181":1}}],["specialist",{"2":{"136":1,"138":1}}],["specializes",{"2":{"157":1}}],["specialized",{"2":{"52":1,"124":1,"140":1,"141":1,"179":2,"181":1}}],["specializing",{"2":{"125":1,"127":1}}],["special",{"2":{"114":4,"115":2,"120":3,"155":2,"156":2,"159":2,"170":2,"171":2,"172":2,"174":2,"178":5,"183":2}}],["specifying",{"2":{"67":1,"181":1,"183":1}}],["specify",{"2":{"15":1,"34":1,"58":1,"65":1,"67":2,"85":2,"108":1,"130":1,"181":19,"183":4}}],["specified",{"2":{"58":2,"66":1,"67":1,"93":1,"140":1,"157":2,"179":3,"181":12,"183":6}}],["specifies",{"2":{"11":1,"52":1,"179":1,"181":1}}],["specification",{"2":{"102":1,"108":1,"181":1}}],["specifications",{"2":{"58":1,"108":1,"181":1}}],["specifically",{"2":{"0":1,"114":1,"181":2}}],["specific",{"2":{"0":2,"6":1,"10":1,"13":2,"23":2,"24":1,"26":2,"30":1,"31":1,"42":1,"57":1,"58":2,"60":1,"91":1,"105":1,"106":1,"108":2,"120":1,"124":2,"126":3,"130":2,"131":1,"140":1,"141":2,"142":3,"144":1,"145":1,"147":1,"156":2,"171":1,"172":1,"174":1,"181":13,"183":1}}],["spectacles",{"2":{"41":1}}],["span",{"2":{"183":1}}],["spanish",{"2":{"14":1}}],["spain",{"2":{"97":2,"98":2}}],["sparrow",{"2":{"89":4,"181":5}}],["sparse",{"2":{"66":1,"183":5}}],["sparsearrays",{"2":{"1":2,"59":1,"183":4}}],["spawn",{"2":{"46":1,"181":1}}],["spaces",{"2":{"183":3}}],["space",{"2":{"23":1,"26":1,"58":1,"181":3}}],["s",{"2":{"1":1,"2":2,"3":1,"4":1,"5":2,"6":2,"7":2,"10":2,"11":2,"13":1,"15":1,"18":1,"19":4,"20":1,"21":1,"22":3,"24":6,"32":2,"36":1,"37":1,"39":1,"40":1,"42":1,"43":1,"49":1,"51":1,"52":12,"58":7,"59":1,"61":5,"63":2,"65":1,"67":8,"69":1,"71":3,"74":1,"77":1,"78":2,"79":2,"84":3,"86":1,"87":5,"88":4,"89":3,"90":1,"92":1,"100":1,"102":2,"105":1,"106":2,"107":3,"108":14,"117":1,"118":1,"121":3,"122":2,"130":2,"131":1,"138":1,"140":8,"141":5,"142":11,"144":2,"145":2,"147":3,"150":1,"155":2,"156":1,"163":2,"165":2,"167":2,"170":3,"174":2,"178":1,"179":19,"181":96,"183":24}}],["seq",{"2":{"181":5}}],["sequentially",{"2":{"46":1,"52":2,"179":2,"181":1}}],["sequences",{"2":{"58":1,"181":1,"183":1}}],["sequence",{"2":{"21":2,"58":2,"103":1,"181":6}}],["sedan",{"2":{"181":1}}],["september",{"2":{"78":1}}],["separator=",{"2":{"58":1,"181":1}}],["separators=",{"2":{"58":4,"63":1,"67":1,"181":4,"183":1}}],["separators",{"2":{"58":19,"67":1,"90":4,"181":20,"183":7}}],["separator",{"2":{"58":9,"179":1,"181":9}}],["separated",{"2":{"179":1,"181":1,"183":1}}],["separate",{"2":{"2":1,"17":1,"59":1,"163":1,"167":1,"179":2,"181":3,"182":1,"183":2}}],["segment",{"2":{"67":1,"183":1}}],["segments",{"2":{"58":2,"181":4}}],["selects",{"2":{"179":1}}],["selecting",{"2":{"181":1}}],["selection",{"2":{"179":2}}],["selectively",{"2":{"52":1,"179":1}}],["select",{"2":{"61":1,"97":1,"114":1,"136":1,"138":3,"161":1,"179":7,"181":1}}],["selected",{"2":{"13":1,"28":1,"67":1,"92":1,"161":5,"181":3,"183":1}}],["self",{"2":{"48":1,"88":1,"106":1,"130":1,"179":1,"181":1}}],["sessions",{"2":{"80":1,"179":1,"181":2}}],["session",{"2":{"42":1,"52":1,"82":1,"179":7,"181":1}}],["sense",{"2":{"108":1,"181":1}}],["sensitive",{"2":{"22":1,"57":1,"84":1,"181":1}}],["sender",{"2":{"181":4}}],["sends",{"2":{"67":1,"180":1,"183":1}}],["send",{"2":{"52":1,"74":1,"87":1,"88":1,"102":1,"103":1,"107":1,"179":1,"181":5}}],["sending",{"2":{"23":1,"27":1,"57":1,"87":1,"89":2,"181":14}}],["senior",{"2":{"24":2}}],["sentences",{"2":{"57":3,"58":3,"61":3,"67":3,"90":1,"163":1,"174":1,"181":4,"183":11}}],["sentence",{"2":{"31":1,"57":1,"58":6,"61":1,"67":4,"77":1,"90":2,"108":3,"181":6,"183":6}}],["sentiment",{"2":{"17":1}}],["sent",{"0":{"92":1},"2":{"0":1,"22":1,"75":1,"84":1,"92":3,"102":1,"108":1,"181":7,"183":3}}],["several",{"2":{"22":1,"37":1,"86":1,"108":1,"130":1,"157":1,"163":2,"181":5}}],["seven",{"2":{"7":1}}],["secret",{"2":{"72":1,"95":1,"174":1}}],["secrets",{"2":{"58":4,"181":4}}],["sections",{"2":{"156":2,"163":4,"167":1,"181":2}}],["section",{"2":{"17":1,"22":1,"37":1,"59":1,"69":1,"72":1,"95":1,"98":1,"99":1,"155":2,"156":2,"163":1,"167":1}}],["seconds",{"2":{"10":1,"11":1,"20":2,"22":1,"23":1,"26":1,"30":1,"31":1,"52":3,"74":1,"87":1,"97":2,"98":1,"106":1,"179":2,"181":18}}],["second",{"2":{"7":4,"58":3,"67":1,"87":1,"107":3,"179":1,"181":6,"183":4}}],["seas",{"2":{"89":2,"181":2}}],["seats",{"2":{"88":2}}],["sea",{"2":{"58":1,"181":1}}],["searches",{"2":{"181":3}}],["searching",{"2":{"181":2,"183":1}}],["search",{"2":{"11":1,"13":1,"16":1,"21":2,"24":3,"49":1,"52":2,"54":1,"55":11,"61":4,"66":2,"67":4,"89":1,"112":2,"114":2,"115":2,"117":1,"118":12,"125":2,"126":3,"127":5,"179":4,"180":12,"181":8,"182":1,"183":32}}],["seamless",{"0":{"11":1},"2":{"0":1}}],["semantic",{"2":{"8":1,"16":1,"127":1,"183":3}}],["semi",{"2":{"6":1,"7":4}}],["semijoin",{"2":{"6":1}}],["setter",{"2":{"183":1}}],["settings",{"2":{"79":1,"179":1}}],["setting",{"0":{"76":1,"80":1},"2":{"52":1,"65":1,"90":1,"179":1,"181":1}}],["setpropertynested",{"2":{"67":2,"181":1,"183":7}}],["setup",{"0":{"84":1},"2":{"22":1,"69":1,"79":1,"83":1,"95":1,"170":1,"178":1,"179":1}}],["sets",{"2":{"4":1,"5":4,"6":2,"7":5,"67":1,"108":1,"120":1,"179":1,"181":3,"183":5}}],["set",{"0":{"7":1,"73":2},"2":{"3":2,"4":1,"7":5,"8":1,"10":2,"12":1,"15":1,"18":1,"21":2,"23":1,"24":1,"27":1,"29":2,"30":2,"31":2,"42":2,"48":1,"51":2,"52":20,"54":1,"56":1,"57":1,"58":1,"59":1,"60":2,"61":1,"63":2,"67":5,"72":1,"73":2,"74":1,"76":2,"78":1,"79":5,"80":2,"85":1,"87":1,"88":1,"95":5,"106":1,"108":2,"114":1,"127":1,"155":1,"156":1,"157":1,"170":1,"172":1,"174":1,"178":1,"179":13,"181":56,"183":18}}],["seem",{"2":{"181":1}}],["seems",{"2":{"181":2}}],["seel",{"2":{"52":1,"179":1}}],["seek",{"2":{"12":1,"35":1,"41":1}}],["see",{"0":{"92":1},"2":{"2":3,"7":1,"8":3,"10":4,"13":3,"17":1,"19":1,"21":1,"22":1,"23":3,"24":4,"26":2,"32":1,"37":2,"49":2,"52":11,"55":1,"59":1,"61":4,"66":1,"67":9,"69":1,"70":1,"74":2,"75":1,"79":2,"80":2,"81":1,"83":1,"84":2,"85":1,"87":1,"88":1,"90":1,"92":4,"93":1,"95":2,"98":1,"99":1,"102":1,"105":2,"106":4,"108":8,"112":1,"179":17,"180":1,"181":86,"183":40}}],["serializable",{"2":{"181":4}}],["serialization",{"2":{"1":1,"89":1,"105":1}}],["serialized",{"2":{"93":1}}],["serializes",{"2":{"93":1}}],["series",{"2":{"58":1,"78":1,"181":2}}],["serves",{"2":{"61":1}}],["serve",{"2":{"35":1,"84":1}}],["serverpreference",{"2":{"181":1}}],["server`",{"2":{"181":1}}],["servers",{"2":{"181":1}}],["server",{"0":{"28":1},"2":{"0":2,"25":1,"28":4,"37":1,"181":16}}],["services",{"2":{"71":1}}],["service",{"2":{"23":1,"26":1,"71":1,"84":1}}],["sun",{"2":{"183":3}}],["sunny",{"2":{"181":10}}],["sunnweiwei",{"2":{"112":1,"183":1}}],["sum",{"2":{"181":3}}],["summarizing",{"2":{"155":1,"156":1,"157":1}}],["summarize",{"2":{"130":1,"155":2,"156":1}}],["summary",{"2":{"58":1,"152":1,"155":1,"156":5,"171":1,"172":1,"174":1,"181":1}}],["suitability",{"2":{"142":1}}],["suitable",{"2":{"22":1,"58":1,"138":1,"144":1,"145":1,"147":1,"161":1,"163":2,"181":2}}],["suggesting",{"2":{"181":1}}],["suggestion",{"2":{"141":1,"142":1}}],["suggestions",{"2":{"24":1,"140":5,"141":5,"142":5}}],["suggests",{"2":{"150":1}}],["suggested",{"2":{"130":1}}],["suggest",{"2":{"130":1,"131":1}}],["suffixed",{"2":{"181":1}}],["suffix",{"2":{"52":3,"181":5}}],["suffering",{"2":{"35":1,"181":2}}],["super",{"2":{"155":1,"156":1}}],["supertype",{"2":{"101":1}}],["superseded",{"2":{"42":1}}],["supplied",{"2":{"10":1,"49":1,"52":1,"106":1,"179":1}}],["suppose",{"2":{"5":1,"7":1}}],["supports",{"2":{"0":1,"181":2,"183":2}}],["support",{"2":{"0":1,"23":2,"27":1,"32":1,"60":1,"61":2,"67":11,"71":1,"78":1,"181":13,"183":22}}],["supported",{"2":{"0":1,"10":1,"43":1,"60":1,"67":2,"86":1,"108":2,"181":13,"183":4}}],["survey",{"2":{"157":5,"159":1}}],["surrounding",{"2":{"67":1,"183":7}}],["surface",{"2":{"13":1,"63":1,"161":1,"181":2}}],["sure",{"2":{"4":1,"7":1,"8":1,"37":2,"52":1,"67":1,"79":2,"95":2,"108":1,"138":1,"165":1,"167":1,"179":1,"181":4,"183":5}}],["subdocumenttermmatrix",{"2":{"181":1,"183":2}}],["subchunkindex",{"2":{"181":1,"183":5}}],["subcomponents",{"2":{"67":1,"183":1}}],["subject",{"2":{"163":2}}],["subheadings",{"2":{"155":2}}],["subheading",{"2":{"155":1}}],["submitted",{"2":{"71":1,"140":1}}],["sub",{"2":{"61":1,"63":1,"67":6,"77":1,"107":1,"157":1,"181":4,"183":18}}],["subseq",{"2":{"181":5}}],["subsequence",{"2":{"57":6,"58":17,"181":25}}],["subsequent",{"2":{"52":1,"58":1,"88":1,"179":3,"181":3}}],["subset",{"2":{"170":1,"178":1,"183":5}}],["substantial",{"2":{"156":1}}],["substring",{"2":{"61":2,"181":5,"183":2}}],["subarray",{"2":{"45":1}}],["subfolder",{"2":{"11":3}}],["subfolders",{"2":{"11":1}}],["subtype",{"2":{"0":1,"101":1}}],["subtypes",{"2":{"0":1,"52":2,"64":2,"67":3,"86":1,"102":2,"179":2,"183":4}}],["suceeding",{"2":{"183":1}}],["succinct",{"2":{"124":1}}],["successfully",{"2":{"52":2,"126":1,"179":1,"181":3}}],["successful",{"2":{"52":5,"88":1,"108":1,"115":1,"179":8,"181":2}}],["success",{"2":{"4":1,"52":7,"88":2,"179":13,"181":9}}],["succeeding",{"2":{"2":1}}],["such",{"2":{"0":1,"52":1,"93":1,"114":1,"140":3,"142":1,"161":1,"179":1,"181":10,"183":2}}],["iobuffer",{"2":{"183":1}}],["io",{"2":{"181":24,"183":17}}],["illustrated",{"2":{"158":1,"159":1}}],["illustrate",{"2":{"157":1,"164":1,"166":1,"173":1}}],["i>macro",{"2":{"181":1}}],["i>method",{"2":{"181":5,"183":1}}],["i>",{"2":{"58":1,"181":6,"183":1}}],["i>function",{"2":{"58":1}}],["iphone",{"2":{"12":4,"35":1,"41":2,"181":7}}],["ignored",{"2":{"181":1}}],["ignores",{"2":{"181":8}}],["ignore",{"2":{"11":4,"174":1}}],["immediate",{"2":{"181":1}}],["immediately",{"2":{"89":1,"152":1,"181":1,"183":2}}],["im",{"2":{"181":2}}],["imagine",{"2":{"11":1,"77":1}}],["image",{"0":{"20":1},"2":{"10":2,"20":7,"43":6,"106":2,"152":7,"153":1,"181":89}}],["images",{"0":{"43":1},"2":{"10":2,"20":1,"42":1,"43":1,"55":2,"104":2,"106":2,"180":2,"181":19}}],["impact",{"2":{"61":1,"120":1,"140":2}}],["impartial",{"2":{"17":1,"121":2,"122":1,"137":1}}],["improper",{"2":{"142":1}}],["improving",{"2":{"14":1,"52":1,"141":1,"142":1,"179":1}}],["improved",{"2":{"130":1,"131":1}}],["improvements",{"2":{"130":2,"131":2,"140":1,"142":1}}],["improvement",{"2":{"71":1,"130":4,"140":1,"141":1}}],["improves",{"2":{"19":1}}],["improve",{"2":{"8":1,"21":1,"51":1,"52":1,"66":2,"71":3,"118":1,"124":1,"125":1,"126":1,"127":1,"128":1,"130":6,"132":1,"141":1,"179":2,"181":2,"183":2}}],["imprints",{"2":{"58":1,"181":1}}],["impermanence",{"2":{"12":1}}],["imported",{"2":{"182":1}}],["imports",{"2":{"52":2,"181":7}}],["important",{"2":{"8":1,"67":2,"114":2,"126":1,"155":2,"157":1,"165":1,"167":1,"181":5,"183":2}}],["import",{"2":{"1":1,"21":1,"24":2,"32":1,"37":1,"48":1,"53":1,"57":1,"59":1,"85":1,"181":2,"183":1}}],["implementing",{"2":{"181":1}}],["implement",{"2":{"51":1,"52":1,"61":1,"179":1,"181":1}}],["implements",{"2":{"49":1,"179":2}}],["implemented",{"2":{"0":1,"106":1,"183":4}}],["implementations",{"2":{"61":1,"64":1}}],["implementation",{"2":{"0":1,"61":1,"86":2,"112":1,"130":1,"131":2,"181":7,"183":12}}],["ie",{"2":{"10":3,"24":1,"52":4,"57":1,"58":2,"59":1,"66":1,"67":2,"74":1,"77":1,"89":2,"90":1,"104":1,"106":3,"108":1,"179":7,"181":19,"183":16}}],["irrelevant",{"2":{"8":1,"118":1}}],["idx",{"2":{"183":3}}],["idiomatic",{"2":{"130":1}}],["id`",{"2":{"52":1,"179":1}}],["ids",{"2":{"7":2,"136":1,"138":1,"181":21,"183":3}}],["id",{"2":{"7":11,"52":53,"61":1,"67":3,"93":2,"136":1,"179":69,"181":77,"183":19}}],["id=",{"2":{"7":2,"58":1,"181":6,"183":1}}],["idempotent",{"2":{"181":12}}],["identity",{"2":{"181":5,"183":1}}],["identifies",{"2":{"181":2}}],["identified",{"2":{"126":1,"157":1,"183":2}}],["identifiers",{"2":{"67":1,"112":1,"114":1,"181":2,"183":1}}],["identifier",{"2":{"67":1,"112":1,"114":1,"179":1,"181":11,"183":6}}],["identifying",{"2":{"181":2}}],["identify",{"2":{"114":1,"130":1,"141":1,"142":1,"156":1,"157":1}}],["identical",{"2":{"0":1,"57":1,"108":1,"183":1}}],["ideal",{"2":{"165":1,"181":2}}],["ideally",{"2":{"1":1,"2":1,"3":1,"161":2,"181":2}}],["ideas",{"2":{"8":1,"152":1}}],["idea",{"2":{"5":1,"13":1,"181":3}}],["i",{"0":{"70":1,"78":1,"91":1,"92":1},"2":{"2":1,"11":1,"12":5,"13":4,"21":2,"22":1,"23":5,"24":1,"26":5,"30":3,"31":5,"34":2,"35":2,"39":1,"40":1,"41":5,"42":1,"51":2,"52":3,"58":2,"61":1,"87":4,"88":2,"90":1,"92":5,"107":1,"108":7,"115":1,"130":2,"131":2,"132":2,"179":10,"181":50,"183":6}}],["if",{"0":{"70":1,"78":1},"2":{"1":1,"2":1,"10":5,"13":1,"16":1,"17":1,"18":1,"19":1,"20":1,"21":2,"22":1,"23":2,"24":4,"26":2,"27":1,"28":1,"32":1,"37":1,"39":1,"42":2,"49":2,"51":3,"52":40,"57":1,"58":8,"60":2,"61":2,"63":1,"65":2,"66":3,"67":21,"73":5,"74":5,"75":2,"77":2,"84":1,"85":1,"86":2,"87":1,"88":6,"89":5,"93":3,"97":1,"98":1,"102":1,"106":4,"108":6,"110":1,"114":2,"115":2,"117":5,"118":6,"120":2,"130":3,"131":1,"137":1,"138":2,"140":2,"141":3,"142":3,"155":4,"156":2,"157":1,"159":1,"163":2,"170":3,"171":1,"172":2,"174":2,"177":1,"178":3,"179":47,"181":228,"183":60}}],["inherit",{"2":{"183":1}}],["inactive",{"2":{"181":2}}],["inactived",{"2":{"181":1}}],["inanimate",{"2":{"35":1}}],["inefficiencies",{"2":{"142":1}}],["inline",{"2":{"130":3}}],["initializes",{"2":{"181":1}}],["initialized",{"2":{"181":11}}],["initialize",{"2":{"181":14}}],["initialisms",{"2":{"126":1}}],["initiate",{"2":{"52":1,"179":1,"181":1}}],["injects",{"2":{"126":1}}],["injected",{"2":{"112":1,"183":1}}],["inject",{"2":{"97":1,"181":1}}],["inverse",{"2":{"183":2}}],["investigating",{"2":{"183":3}}],["investigate",{"2":{"61":1}}],["involve",{"2":{"181":2}}],["involved",{"2":{"0":1}}],["invalid",{"2":{"52":2,"67":2,"181":2,"183":2}}],["infinitely",{"2":{"181":2}}],["inferred",{"2":{"181":1}}],["inferfaces",{"2":{"181":1}}],["influential",{"2":{"174":1}}],["influence",{"2":{"52":1,"108":1,"179":1}}],["informal",{"2":{"163":1}}],["informative",{"2":{"155":1,"156":1,"167":1}}],["information",{"2":{"0":2,"6":1,"10":1,"17":1,"19":1,"23":3,"26":3,"29":1,"30":1,"49":1,"52":2,"55":1,"59":1,"63":1,"67":4,"71":1,"74":1,"75":1,"81":1,"83":1,"84":1,"85":1,"90":1,"92":1,"100":1,"103":1,"104":2,"106":1,"108":1,"110":2,"117":2,"118":1,"120":4,"121":1,"122":1,"126":2,"128":1,"144":1,"145":1,"147":1,"155":1,"156":1,"163":1,"171":1,"172":1,"174":1,"179":5,"180":1,"181":69,"183":6}}],["informed",{"2":{"6":1}}],["info",{"2":{"4":1,"7":2,"10":1,"11":2,"20":2,"22":1,"23":1,"26":1,"30":1,"31":1,"52":10,"58":2,"61":1,"87":2,"88":2,"97":2,"98":1,"106":1,"108":1,"179":10,"181":13}}],["inplace",{"2":{"179":6}}],["inplace=true",{"2":{"52":1,"179":1}}],["input=",{"2":{"108":2}}],["input2",{"2":{"58":3,"181":3}}],["input1",{"2":{"58":3,"181":3}}],["inputclassifier",{"0":{"136":1},"2":{"18":1,"88":1,"181":3}}],["inputs",{"2":{"10":2,"21":1,"49":3,"51":1,"89":1,"100":1,"102":1,"105":1,"106":3,"108":1,"170":1,"178":1,"181":6,"183":3}}],["input",{"2":{"10":2,"18":4,"67":1,"88":2,"106":2,"108":3,"127":1,"136":7,"138":3,"174":1,"181":23,"183":18}}],["inches",{"2":{"181":1}}],["incredible",{"2":{"155":1}}],["increase",{"2":{"49":1,"76":1,"179":1,"181":3}}],["incorporating",{"2":{"183":2}}],["incorrect",{"2":{"142":1}}],["inconsistencies",{"2":{"141":1}}],["inconsistent",{"2":{"121":1}}],["incomplete",{"2":{"121":1,"181":3}}],["including",{"2":{"10":1,"17":1,"25":1,"49":1,"84":1,"108":1,"174":1,"179":2,"181":20,"183":3}}],["includes",{"2":{"7":1,"67":2,"83":1,"179":2,"181":1,"183":6}}],["included",{"2":{"7":1,"131":1,"156":1,"181":5,"182":1,"183":2}}],["include",{"2":{"2":1,"7":1,"10":1,"55":9,"61":2,"67":1,"106":1,"124":1,"125":1,"126":1,"142":1,"155":2,"156":1,"170":1,"178":1,"180":9,"181":14,"183":9}}],["indentation",{"2":{"183":1}}],["independent",{"2":{"179":1}}],["index>",{"2":{"183":1}}],["indexing",{"2":{"67":1,"171":1,"172":1,"183":1}}],["indexes",{"2":{"183":7}}],["indexed",{"2":{"66":2,"67":1,"183":1}}],["indexer",{"2":{"64":1,"67":12,"183":13}}],["index",{"2":{"2":16,"3":1,"4":2,"6":1,"7":2,"8":3,"10":1,"60":5,"61":11,"63":5,"64":5,"65":2,"66":5,"67":54,"181":2,"183":205}}],["industry",{"2":{"126":1}}],["indifferent",{"2":{"58":1,"181":1}}],["individual",{"2":{"52":2,"75":1,"121":1,"179":2,"181":1,"183":2}}],["indication",{"2":{"157":1}}],["indicating",{"2":{"67":1,"138":2,"179":1,"181":21,"183":13}}],["indicate",{"2":{"140":1,"142":1,"179":2}}],["indicated",{"2":{"112":1}}],["indicates",{"2":{"52":3,"174":1,"179":4,"181":1}}],["indices",{"2":{"8":1,"67":1,"181":2,"183":23}}],["inserting",{"2":{"183":1}}],["inserted",{"2":{"181":2}}],["insert",{"2":{"181":1}}],["insufficient",{"2":{"75":1}}],["inside",{"2":{"52":2,"181":3}}],["insights",{"2":{"6":1,"155":5}}],["inspired",{"2":{"21":1,"52":1,"124":1,"125":1,"174":1,"179":1,"181":3}}],["inspect",{"2":{"13":1,"52":2,"179":1,"181":5}}],["instructor",{"2":{"181":3}}],["instruction",{"2":{"181":1,"183":3}}],["instructions>",{"2":{"178":4}}],["instructions=",{"2":{"67":1,"114":1,"115":1,"120":1,"155":1,"156":1,"157":1,"170":1,"172":1,"174":1,"178":1,"183":2}}],["instructions",{"2":{"4":1,"36":1,"67":2,"103":1,"105":1,"110":1,"114":9,"115":5,"117":1,"118":1,"120":9,"121":1,"130":3,"136":1,"138":1,"140":6,"141":1,"142":2,"144":1,"145":1,"147":1,"152":1,"155":7,"156":6,"157":3,"159":4,"161":2,"163":2,"167":1,"170":7,"171":4,"172":6,"174":9,"178":6,"181":1,"183":2}}],["instruct",{"2":{"28":1,"108":1}}],["installation",{"0":{"96":1},"2":{"84":1}}],["installated",{"2":{"37":1}}],["installing",{"2":{"52":1,"181":2}}],["installed",{"2":{"22":1,"84":1,"96":2}}],["instant",{"0":{"82":1}}],["instantiating",{"2":{"183":1}}],["instantiation",{"2":{"52":2,"181":2}}],["instantiated",{"2":{"10":1,"49":1,"106":1,"181":1}}],["instances",{"2":{"181":2}}],["instance",{"2":{"10":2,"19":1,"49":2,"52":2,"63":1,"106":2,"174":3,"179":18,"181":5}}],["instead",{"2":{"15":1,"24":1,"58":1,"90":1,"130":1,"131":1,"174":1,"181":8,"183":2}}],["innerjoin",{"2":{"7":2}}],["inner",{"2":{"6":1,"7":5,"93":1}}],["int32",{"2":{"181":1}}],["intricate",{"2":{"181":5}}],["intro",{"2":{"58":2}}],["introduced",{"2":{"42":1}}],["introduction",{"0":{"48":1,"53":1,"59":1},"1":{"49":1,"50":1,"51":1,"52":1,"54":1,"55":1,"60":1,"61":1,"62":1,"63":1,"64":1,"65":1,"66":1,"67":1},"2":{"5":1,"131":1}}],["int=60",{"2":{"183":1}}],["int=3",{"2":{"183":1}}],["int=32000",{"2":{"179":1}}],["int=35000",{"2":{"58":4,"181":4}}],["int=1",{"2":{"179":1}}],["int=512",{"2":{"52":1,"179":1}}],["int64",{"2":{"7":3,"13":1,"24":1,"45":1,"61":4,"89":1,"181":3}}],["int",{"2":{"7":1,"19":2,"52":18,"58":2,"67":2,"88":5,"179":31,"181":49,"183":38}}],["into",{"0":{"11":1},"2":{"2":3,"10":1,"17":1,"18":1,"35":1,"45":1,"52":1,"57":6,"58":6,"59":1,"61":1,"66":1,"67":7,"82":1,"89":1,"92":2,"96":1,"106":1,"108":4,"126":1,"128":1,"155":2,"156":2,"157":2,"179":2,"181":32,"183":25}}],["intelligent",{"2":{"112":1}}],["intelligence",{"2":{"16":1}}],["intent",{"2":{"127":1}}],["intention",{"2":{"63":1,"182":1}}],["intended",{"2":{"52":1,"66":2,"125":1,"127":1,"140":4,"179":2,"181":2,"183":1}}],["intends",{"2":{"36":1}}],["integrity",{"2":{"140":1}}],["integrates",{"2":{"61":1,"179":1}}],["integration",{"0":{"11":1},"2":{"0":2,"170":1,"178":1}}],["integer",{"2":{"55":1,"67":10,"88":1,"180":1,"181":15,"183":33}}],["integer=1",{"2":{"52":1,"179":2}}],["integers",{"2":{"7":1,"181":3,"183":1}}],["intersection",{"2":{"183":1}}],["interpolate",{"2":{"181":1}}],["interpolated",{"2":{"181":1}}],["interpolation",{"0":{"40":1},"2":{"97":1,"171":1,"172":1,"181":6}}],["interprets",{"2":{"150":1}}],["interested",{"2":{"108":1,"181":1}}],["interesting",{"2":{"41":1}}],["internally",{"2":{"67":1,"183":1}}],["internal",{"2":{"63":1,"67":1,"86":1,"163":3,"181":1,"183":1}}],["interface",{"0":{"62":1},"1":{"63":1,"64":1,"65":1,"66":1},"2":{"59":1,"66":1,"181":5}}],["intermediate",{"2":{"6":1,"61":2}}],["interaction",{"2":{"179":2}}],["interactions",{"2":{"174":1,"179":5,"181":2}}],["interactive",{"2":{"52":1,"61":1,"179":2}}],["interact",{"2":{"1":1,"10":1,"20":1,"49":1,"52":2,"101":1,"106":1,"179":2}}],["in",{"0":{"2":1,"81":1},"2":{"0":1,"1":2,"2":9,"3":1,"4":2,"6":3,"7":24,"8":4,"10":8,"11":6,"12":1,"13":3,"14":1,"15":3,"17":1,"18":1,"19":4,"20":4,"21":5,"22":2,"23":3,"24":15,"26":2,"28":3,"30":2,"31":2,"32":3,"35":1,"36":1,"37":1,"39":1,"41":1,"42":2,"49":1,"51":3,"52":54,"54":1,"55":4,"56":1,"57":4,"58":7,"60":4,"61":15,"63":6,"65":1,"66":3,"67":23,"69":1,"71":2,"73":6,"74":3,"75":1,"76":1,"77":1,"78":2,"79":4,"80":2,"82":1,"84":2,"86":2,"87":4,"88":4,"89":7,"91":2,"92":1,"93":1,"95":4,"97":4,"98":1,"105":2,"106":8,"107":7,"108":16,"112":2,"114":3,"120":4,"121":3,"124":1,"125":2,"126":4,"127":1,"128":1,"130":7,"131":4,"132":1,"136":1,"138":1,"140":4,"141":2,"142":4,"144":4,"145":3,"147":2,"153":1,"155":2,"156":2,"157":3,"160":1,"161":1,"163":7,"164":3,"166":2,"167":2,"168":1,"169":2,"170":1,"173":3,"176":1,"177":5,"178":3,"179":65,"180":4,"181":268,"182":2,"183":134}}],["itr2",{"2":{"58":2,"181":2}}],["itr1",{"2":{"58":2,"181":2}}],["iters",{"2":{"181":1}}],["iterative",{"2":{"179":1,"181":1}}],["iteratively",{"2":{"58":2,"132":1,"179":2,"181":2}}],["iterating",{"2":{"179":1}}],["iterations",{"2":{"131":1}}],["iteration",{"2":{"58":1,"179":2,"181":1}}],["iterates",{"2":{"181":1}}],["iterate",{"2":{"52":1,"179":1}}],["itemsextract",{"2":{"181":10}}],["items",{"2":{"7":2,"8":1,"63":1,"67":2,"108":1,"114":2,"126":1,"181":13,"183":10}}],["item",{"2":{"4":1,"5":1,"6":1,"7":3,"52":1,"67":1,"179":1,"181":1,"183":11}}],["itself",{"2":{"19":1,"51":1,"58":1,"67":1,"181":4,"183":3}}],["its",{"2":{"10":2,"12":1,"21":2,"24":1,"32":1,"49":3,"51":2,"52":2,"58":4,"61":2,"65":1,"67":4,"85":1,"89":1,"90":1,"93":1,"101":1,"104":1,"106":2,"117":1,"118":1,"120":1,"127":1,"140":2,"141":1,"152":1,"156":3,"167":1,"171":1,"172":1,"174":3,"179":6,"181":12,"183":11}}],["it",{"0":{"77":2,"99":1},"1":{"100":1,"101":1,"102":1,"103":1,"104":1,"105":1,"106":1,"107":1,"108":1},"2":{"0":4,"2":4,"4":2,"5":1,"7":1,"8":2,"10":14,"11":4,"12":4,"13":4,"15":1,"17":1,"18":3,"19":2,"21":8,"22":3,"23":2,"24":11,"26":1,"28":5,"29":3,"30":3,"31":4,"32":3,"36":1,"37":1,"39":1,"40":1,"41":3,"42":4,"43":1,"49":5,"51":6,"52":33,"54":1,"57":1,"58":14,"59":2,"60":1,"61":2,"63":1,"67":17,"69":1,"72":2,"73":4,"74":3,"75":2,"76":1,"77":2,"78":1,"79":6,"80":1,"84":4,"85":2,"87":4,"88":8,"89":13,"90":1,"91":2,"92":2,"93":2,"95":6,"97":1,"99":3,"100":1,"101":1,"102":3,"103":2,"104":1,"105":4,"106":14,"107":8,"108":17,"117":2,"118":2,"120":1,"121":1,"124":1,"125":2,"126":1,"127":2,"128":2,"130":5,"131":5,"132":1,"136":1,"137":3,"138":2,"140":3,"142":2,"144":1,"150":1,"152":1,"155":3,"157":2,"159":1,"163":3,"165":2,"167":2,"169":1,"170":4,"171":1,"172":1,"174":2,"177":1,"178":4,"179":63,"181":224,"182":2,"183":82}}],["isolate",{"2":{"181":1}}],["istracermessage",{"2":{"181":2}}],["isextracted",{"2":{"181":4}}],["isn",{"2":{"108":1,"117":2,"181":3}}],["isnothing",{"2":{"7":1,"52":1,"88":1,"179":1,"181":1,"183":1}}],["issues",{"2":{"130":2,"142":1}}],["issue",{"2":{"57":1,"60":1,"67":1,"73":1,"130":2,"183":4}}],["islowercase",{"2":{"52":1,"179":1}}],["isvalid",{"2":{"21":1,"51":1,"52":4,"179":1,"181":4}}],["isa",{"2":{"10":4,"52":2,"88":1,"93":3,"106":4,"108":1,"179":2,"181":4}}],["is",{"0":{"77":1,"92":2},"2":{"0":7,"1":2,"2":4,"3":1,"5":2,"6":5,"7":18,"8":1,"10":11,"11":3,"12":1,"13":4,"15":1,"17":4,"19":4,"21":9,"22":2,"24":5,"28":5,"29":1,"30":3,"31":5,"32":1,"35":1,"36":1,"37":2,"39":1,"40":2,"41":4,"42":2,"43":1,"47":1,"48":1,"49":6,"51":8,"52":44,"53":1,"54":1,"55":9,"58":21,"59":2,"60":2,"61":2,"63":4,"65":2,"66":7,"67":53,"71":2,"73":5,"74":3,"75":1,"77":3,"78":2,"79":1,"83":2,"84":2,"86":2,"87":4,"88":5,"89":1,"91":1,"92":2,"93":1,"97":8,"98":5,"99":2,"101":3,"103":1,"104":2,"105":3,"106":13,"107":14,"108":16,"115":1,"117":3,"118":2,"120":3,"121":5,"124":1,"125":1,"126":3,"127":4,"128":2,"130":2,"131":2,"136":1,"137":3,"138":1,"140":1,"141":3,"142":2,"144":1,"145":1,"150":1,"152":1,"155":1,"156":3,"160":1,"161":1,"162":1,"163":3,"164":1,"165":2,"166":1,"167":3,"168":1,"169":3,"170":2,"171":1,"172":1,"173":1,"174":1,"176":2,"177":4,"178":3,"179":66,"180":9,"181":276,"182":5,"183":173}}],["dtm",{"2":{"183":2}}],["dynamically",{"2":{"181":2}}],["dynamic",{"2":{"179":1}}],["duplicates",{"2":{"183":1}}],["duplication",{"2":{"67":1,"183":1}}],["due",{"2":{"52":1,"181":2}}],["during",{"2":{"7":1,"52":3,"179":2,"181":3,"183":1}}],["drawn",{"2":{"183":5}}],["draft",{"2":{"163":1}}],["drafts",{"2":{"163":1}}],["drafteremailbrief",{"0":{"163":1}}],["driven",{"2":{"181":1}}],["drives",{"2":{"179":1}}],["drive",{"2":{"108":1}}],["dry",{"2":{"92":4,"181":26}}],["drops",{"2":{"52":1,"179":1}}],["dr",{"2":{"39":1,"114":2}}],["d",{"2":{"30":1,"31":1,"77":1,"90":1,"181":1,"183":2}}],["dllama",{"2":{"29":3}}],["dspy",{"2":{"21":1,"52":1,"179":1}}],["datetime",{"2":{"181":4}}],["date",{"2":{"114":1,"181":2}}],["dates",{"2":{"114":1}}],["datadeps",{"2":{"183":2}}],["datatype",{"2":{"181":2}}],["data=",{"2":{"181":1}}],["data>",{"2":{"144":4,"145":4,"177":4}}],["datamessage",{"2":{"10":2,"45":2,"46":1,"47":1,"104":1,"106":2,"181":14}}],["dataframerowsourcecontextquestionanswerretrieval",{"2":{"7":1}}],["dataframe",{"2":{"7":11,"13":1,"24":1,"181":2}}],["dataframeswhat",{"2":{"7":1}}],["dataframesmeta",{"2":{"1":1,"2":1}}],["dataframes",{"2":{"1":1,"2":3,"5":1,"13":1,"24":1,"114":2,"181":2,"183":1}}],["dataset",{"2":{"6":1,"91":2,"174":1}}],["database",{"2":{"2":1,"5":2,"6":1,"7":11,"126":2}}],["databricks",{"0":{"29":1,"86":1},"2":{"0":1,"29":9,"67":1,"86":1,"181":16,"183":1}}],["databricksopenaischema",{"2":{"0":1,"29":2,"86":2,"181":3}}],["data",{"0":{"19":1,"71":1},"2":{"0":2,"2":4,"5":5,"6":4,"7":34,"8":2,"10":1,"19":3,"22":1,"24":6,"45":1,"52":9,"61":4,"63":3,"66":1,"71":7,"84":1,"104":1,"106":1,"108":2,"114":1,"144":6,"145":6,"147":6,"149":1,"162":5,"167":1,"169":6,"171":1,"174":1,"177":4,"179":18,"181":22,"183":3}}],["damaging",{"2":{"90":1}}],["day",{"2":{"77":1}}],["dashboard",{"2":{"76":1,"183":1}}],["dashboards",{"2":{"61":1}}],["dance",{"2":{"58":1,"181":1}}],["danced",{"2":{"58":1,"181":1}}],["dangerous",{"2":{"35":1,"181":1}}],["darkness",{"2":{"35":1}}],["daphodil",{"2":{"18":1,"181":1}}],["dall",{"2":{"10":1,"106":1,"181":5}}],["diagnostics",{"2":{"183":1}}],["diagram",{"0":{"64":1},"1":{"65":1},"2":{"63":1}}],["dimensionality",{"2":{"183":4}}],["dimension",{"2":{"183":10}}],["diligent",{"2":{"162":1}}],["dilemma",{"2":{"12":1}}],["dir",{"2":{"93":2,"181":19}}],["direction",{"2":{"181":3}}],["direct",{"2":{"157":1}}],["directly",{"2":{"52":1,"58":1,"65":2,"67":4,"76":1,"86":1,"89":2,"105":1,"120":2,"125":1,"127":1,"181":8,"183":8}}],["directory",{"2":{"24":3,"93":1,"181":11}}],["diverse",{"2":{"127":1}}],["divisible",{"2":{"183":1}}],["division",{"2":{"58":1,"181":1}}],["divides",{"2":{"66":1}}],["div",{"2":{"58":1,"181":6,"183":1}}],["div>",{"2":{"58":1,"181":6,"183":1}}],["digits",{"2":{"52":2,"88":1,"179":2}}],["digits=1",{"2":{"7":1}}],["didn",{"2":{"179":1}}],["did",{"2":{"24":1,"108":1}}],["disables",{"2":{"181":1}}],["disable",{"2":{"78":1,"181":1}}],["disabled",{"2":{"67":1,"183":1}}],["disk",{"2":{"66":1,"89":2,"105":1,"181":4,"183":1}}],["disney",{"2":{"58":2,"181":2}}],["displayed",{"2":{"181":1}}],["displaysize",{"2":{"181":1,"183":2}}],["display",{"2":{"13":1,"24":1,"61":1,"67":1,"87":1,"181":2,"183":1}}],["dispatching",{"2":{"58":1,"63":1,"181":1,"183":32}}],["dispatches",{"2":{"52":1,"61":1,"67":1,"179":1,"183":1}}],["dispatched",{"2":{"52":1,"179":1}}],["dispatch",{"2":{"13":1,"24":1,"42":1,"58":2,"63":2,"64":4,"65":2,"67":1,"102":1,"105":1,"171":2,"172":2,"179":1,"181":6,"183":3}}],["distinct",{"2":{"156":1,"157":1}}],["distinguished",{"2":{"171":1,"172":1}}],["distinguish",{"2":{"58":1,"181":1}}],["distributed",{"2":{"179":1}}],["distributing",{"2":{"174":1}}],["distributions",{"2":{"173":1}}],["distribution",{"2":{"61":1,"179":2}}],["distract",{"2":{"41":1}}],["distraction",{"2":{"11":1}}],["dist",{"2":{"58":6,"181":6}}],["distances",{"2":{"58":1,"181":1}}],["distance",{"2":{"8":1,"16":2,"57":3,"58":12,"181":15,"183":10}}],["discounted",{"2":{"183":2}}],["discovery",{"2":{"120":1}}],["discovered",{"2":{"120":2}}],["discover",{"2":{"10":1,"41":1,"61":3,"64":1,"67":2,"105":1,"106":1,"179":1,"183":2}}],["discrimination",{"2":{"181":2}}],["discrepancies",{"2":{"140":1}}],["discrete",{"2":{"10":1,"106":1}}],["discussed",{"2":{"156":2}}],["discuss",{"2":{"30":1,"31":1}}],["discussions",{"2":{"156":1}}],["discussion",{"2":{"11":1,"181":1}}],["differs",{"2":{"108":1}}],["differ",{"2":{"10":1,"106":1,"181":1}}],["differences",{"2":{"181":3}}],["difference",{"2":{"6":1,"7":2,"58":1,"106":1,"181":3,"183":1}}],["differently",{"2":{"10":1,"49":1,"88":1,"106":1,"181":1}}],["different",{"2":{"6":1,"7":2,"22":1,"52":4,"57":1,"67":1,"89":1,"130":2,"163":2,"170":1,"171":1,"172":1,"178":1,"179":5,"181":10,"183":8}}],["dict=parameters",{"2":{"183":1}}],["dict=dict",{"2":{"183":1}}],["dicts",{"2":{"181":1}}],["dictates",{"2":{"67":1,"183":3}}],["dictionaries",{"2":{"42":1,"102":1}}],["dictionary",{"2":{"15":1,"52":1,"61":1,"171":1,"172":1,"181":21,"183":1}}],["dict",{"2":{"6":4,"7":2,"61":1,"88":1,"92":3,"93":1,"107":3,"108":9,"181":43,"183":13}}],["doing",{"2":{"181":1}}],["dollar",{"2":{"91":1}}],["dolphin",{"2":{"37":1}}],["domluna",{"2":{"183":1}}],["domain",{"2":{"126":1,"141":1}}],["domains",{"2":{"55":4,"180":4,"183":6}}],["dominating",{"2":{"23":1,"26":1}}],["dot",{"2":{"16":1,"181":2}}],["double",{"2":{"11":1,"71":1,"80":1,"171":1,"172":1,"181":1}}],["doewhat",{"2":{"7":1}}],["doe",{"2":{"7":6}}],["doesn",{"2":{"181":2}}],["does",{"0":{"77":1},"2":{"2":1,"7":1,"10":1,"36":1,"41":1,"49":1,"52":2,"67":1,"71":2,"78":3,"88":1,"90":1,"92":1,"106":1,"121":2,"130":1,"131":1,"179":1,"181":16,"183":7}}],["don",{"2":{"6":1,"8":1,"24":1,"37":1,"39":1,"103":1,"108":1,"110":3,"114":1,"115":1,"117":3,"118":3,"120":1,"155":2,"156":1,"157":1,"170":1,"172":1,"174":1,"178":1,"181":9,"183":2}}],["done",{"2":{"2":1,"7":1,"52":1,"61":1,"67":1,"85":1,"107":1,"108":1,"140":3,"141":3,"142":3,"179":2,"181":10,"183":1}}],["downstream",{"2":{"88":1,"97":1}}],["downloads",{"2":{"43":1,"181":6}}],["downloaded",{"2":{"22":1}}],["download",{"2":{"10":1,"24":1,"37":1,"43":2,"84":2,"106":1,"181":10,"183":1}}],["down",{"2":{"2":1,"58":1,"63":1,"131":1,"181":1}}],["do",{"0":{"8":1,"90":1},"2":{"2":1,"6":1,"7":4,"10":1,"11":2,"12":3,"13":3,"19":2,"20":1,"21":2,"23":1,"24":1,"26":1,"34":1,"35":1,"36":1,"41":1,"46":1,"51":2,"52":6,"58":1,"63":2,"72":2,"73":2,"74":1,"78":1,"82":1,"87":1,"88":2,"89":1,"90":1,"95":2,"106":1,"108":6,"112":1,"118":1,"130":2,"131":2,"132":1,"155":3,"156":1,"170":1,"174":1,"178":1,"179":6,"181":36,"183":2}}],["doc9",{"2":{"61":1}}],["doc2",{"2":{"61":1}}],["doc5",{"2":{"61":1}}],["doc15",{"2":{"61":1}}],["doc8",{"2":{"61":1}}],["doc$i",{"2":{"61":1}}],["doc",{"2":{"46":2,"67":4,"181":5,"183":4}}],["doctor1",{"2":{"7":1}}],["doctorwhat",{"2":{"7":2}}],["doctor",{"2":{"7":2}}],["documenttermmatrix",{"2":{"181":1,"183":6}}],["documented",{"2":{"63":1}}],["document",{"0":{"45":1},"2":{"2":1,"7":1,"10":1,"45":1,"64":1,"67":9,"114":1,"181":5,"183":21}}],["documents",{"0":{"46":1},"2":{"2":1,"7":1,"46":1,"58":2,"60":2,"61":2,"67":3,"140":1,"181":6,"183":16}}],["documentation",{"2":{"1":1,"19":1,"32":1,"55":1,"61":1,"67":1,"72":1,"95":1,"102":1,"108":1,"114":1,"124":1,"180":1,"181":10,"183":1}}],["docstring",{"2":{"80":1,"90":1,"108":1,"181":12,"183":1}}],["docstrings",{"2":{"19":1,"63":1,"147":1,"181":1}}],["docs",{"2":{"2":2,"21":1,"24":1,"46":1,"64":1,"67":6,"70":1,"75":2,"108":6,"181":13,"183":33}}],["dplyr",{"2":{"2":3}}],["degrees",{"2":{"181":16}}],["denote",{"2":{"155":1}}],["declaration",{"2":{"181":4}}],["declarations",{"2":{"142":1}}],["decoded",{"2":{"181":1}}],["decodes",{"2":{"181":1}}],["decode",{"2":{"108":1,"181":4}}],["decision",{"2":{"156":9}}],["decisions",{"2":{"6":1,"156":5}}],["decides",{"2":{"183":1}}],["decide",{"2":{"17":1,"18":1,"137":1}}],["deduplicate",{"2":{"67":1,"183":1}}],["dedicated",{"2":{"1":1,"23":1,"26":1,"106":1,"156":1}}],["deviations",{"2":{"142":1}}],["device",{"2":{"41":1}}],["developers",{"2":{"181":1}}],["developing",{"2":{"61":1}}],["development",{"2":{"61":1,"120":1}}],["dev",{"2":{"58":1,"181":1}}],["depot",{"2":{"73":1}}],["depth",{"2":{"55":3,"180":3}}],["depend",{"2":{"181":3}}],["dependencies",{"2":{"24":1,"59":1}}],["depends",{"2":{"13":1}}],["depending",{"2":{"10":1,"67":2,"75":1,"106":1,"167":1,"181":1,"183":2}}],["deem",{"2":{"126":1}}],["deemed",{"2":{"52":1,"181":1}}],["deepseek",{"2":{"181":6}}],["deepseekopenaischema",{"2":{"181":2}}],["deepdive",{"0":{"66":1}}],["deeper",{"2":{"65":2}}],["deep",{"2":{"41":1,"131":1,"132":1,"164":1,"166":1,"173":1,"177":1}}],["democards",{"2":{"58":1,"181":1}}],["demonstrate",{"2":{"52":1,"179":1}}],["demanding",{"2":{"28":1}}],["delim",{"2":{"181":2}}],["delicious",{"2":{"31":2,"108":8}}],["dels",{"2":{"66":1}}],["delay=2",{"2":{"52":1,"179":1}}],["delay",{"2":{"21":1,"51":1,"52":5,"179":7,"181":2}}],["delete",{"2":{"2":1,"4":1,"73":1}}],["def2",{"2":{"183":1}}],["def",{"2":{"183":7}}],["defauls",{"2":{"181":1}}],["defaults",{"2":{"52":10,"58":4,"59":1,"63":1,"66":1,"67":19,"179":17,"181":95,"183":30}}],["default",{"0":{"85":1},"2":{"16":1,"37":1,"42":1,"52":2,"55":7,"58":1,"61":3,"63":1,"67":41,"85":2,"97":1,"106":1,"107":1,"108":1,"179":1,"180":7,"181":74,"183":103}}],["defining",{"2":{"63":3}}],["definitions",{"2":{"181":2}}],["definition",{"2":{"52":1,"130":1,"174":7,"181":2}}],["defines",{"2":{"181":5,"183":4}}],["defined",{"0":{"18":1},"2":{"18":1,"52":1,"65":1,"66":1,"104":1,"105":2,"156":1,"165":1,"179":1,"181":22,"183":3}}],["define",{"2":{"2":1,"10":1,"19":2,"24":1,"37":1,"42":1,"52":1,"67":3,"88":2,"106":1,"107":1,"108":2,"140":1,"179":1,"181":16,"183":3}}],["deferring",{"2":{"52":1,"179":1}}],["deferred",{"2":{"10":1,"49":2,"52":2,"106":1,"179":3}}],["defer",{"2":{"17":1}}],["destination",{"2":{"181":1}}],["descending",{"2":{"112":1}}],["descriptive",{"2":{"155":2,"156":1}}],["description=>",{"2":{"181":2}}],["description=sig",{"2":{"108":2}}],["descriptions",{"2":{"18":1,"181":32}}],["description",{"0":{"158":1,"159":1},"1":{"160":1,"161":1,"162":1,"163":1,"164":1,"165":1,"166":1,"167":1,"168":1,"169":1,"170":1,"171":1,"172":1,"173":1,"174":1,"175":1,"176":1,"177":1,"178":1},"2":{"13":1,"18":1,"24":4,"42":1,"89":2,"108":10,"110":1,"112":1,"114":1,"115":1,"117":1,"118":1,"120":1,"121":1,"122":1,"124":1,"125":1,"126":1,"127":1,"128":1,"130":1,"131":1,"132":1,"134":1,"136":1,"137":1,"138":1,"140":1,"141":1,"142":1,"144":2,"145":2,"147":1,"149":1,"150":1,"152":1,"153":1,"155":3,"156":1,"157":1,"160":1,"161":1,"162":1,"163":1,"164":1,"165":1,"166":1,"167":1,"168":1,"169":1,"170":1,"171":1,"172":1,"173":1,"174":5,"176":1,"177":1,"178":1,"181":48}}],["describes",{"2":{"136":1,"138":1,"161":1}}],["described",{"2":{"126":1,"140":1}}],["describe",{"2":{"20":2,"43":1,"155":1,"156":2,"169":1,"171":1,"177":1,"181":6}}],["despite",{"0":{"73":2},"2":{"181":1}}],["desired",{"2":{"57":2,"140":1,"142":1,"179":1}}],["designed",{"2":{"0":1,"10":4,"49":1,"52":2,"59":1,"63":2,"89":1,"106":5,"108":1,"179":2,"181":5}}],["deserialize",{"2":{"2":1,"181":1}}],["debugging",{"2":{"99":1,"181":18,"183":1}}],["debug",{"2":{"2":1,"67":1,"181":1,"183":1}}],["determining",{"2":{"181":1}}],["determines",{"2":{"100":1,"181":1}}],["determine",{"2":{"0":1,"4":1,"7":1,"161":1}}],["detects",{"2":{"181":3}}],["detected",{"2":{"181":1}}],["detect",{"2":{"181":2}}],["detachment",{"2":{"12":2}}],["detail=",{"2":{"181":1}}],["detailorientedtask",{"0":{"162":1}}],["detail",{"2":{"67":1,"80":1,"130":1,"131":1,"144":1,"145":1,"147":1,"162":2,"181":14,"183":1}}],["details",{"2":{"2":1,"10":2,"24":1,"37":1,"49":1,"52":2,"66":1,"67":9,"75":1,"105":2,"106":2,"108":2,"124":1,"125":1,"128":1,"144":1,"145":1,"147":1,"174":1,"179":8,"181":12,"183":30}}],["detailed",{"2":{"0":1,"52":2,"120":1,"140":1,"179":4}}],["aaazzam",{"2":{"181":1}}],["aai",{"2":{"11":2,"98":3,"181":11}}],["axolotl",{"2":{"91":1}}],["azureopenaischema",{"2":{"181":4}}],["azure",{"0":{"86":1},"2":{"181":17}}],["a>",{"2":{"58":1,"181":6,"183":1}}],["away",{"2":{"52":1,"58":1,"73":1,"179":1,"181":1}}],["awareness",{"2":{"67":1,"183":2}}],["aware",{"2":{"49":1,"57":1,"60":1}}],["acronyms",{"2":{"126":1}}],["across",{"2":{"52":1,"65":2,"67":2,"80":1,"108":1,"179":2,"181":2,"183":5}}],["achievable",{"2":{"58":1,"181":1}}],["achieve",{"2":{"34":1,"58":3,"82":1,"88":1,"179":1,"181":3}}],["acts",{"2":{"181":1}}],["action",{"2":{"156":1,"179":2}}],["actionable",{"2":{"140":1,"141":1,"142":1,"150":1,"156":1}}],["active",{"2":{"52":13,"179":17}}],["act",{"2":{"87":1,"92":1,"157":1,"181":3}}],["actually",{"2":{"28":1,"36":2,"130":1,"131":1,"174":1,"181":1}}],["actual",{"2":{"24":2,"52":1,"130":1,"131":1,"132":1,"174":1,"179":2}}],["accumulate",{"2":{"183":1}}],["accuracy",{"2":{"61":1,"140":1,"144":1,"145":1,"147":1,"183":5}}],["accurately",{"2":{"142":1,"150":1,"153":1,"161":1}}],["accurate",{"2":{"23":1,"26":1,"181":1}}],["account",{"2":{"72":2,"74":1,"75":2,"76":1,"78":1,"95":3}}],["according",{"2":{"58":1,"181":2}}],["accesor",{"2":{"183":1}}],["accesses",{"2":{"181":1}}],["accessed",{"2":{"89":1,"181":1}}],["accessible",{"2":{"167":1,"181":1}}],["accessing",{"2":{"72":1,"181":11}}],["accessors",{"2":{"183":1}}],["accessor",{"2":{"52":4,"179":2,"181":2}}],["access",{"0":{"70":1,"82":1},"2":{"21":1,"28":1,"31":1,"45":1,"48":1,"49":1,"51":1,"52":3,"59":1,"73":1,"78":2,"82":2,"84":1,"86":1,"88":1,"95":1,"100":1,"101":1,"110":1,"117":1,"118":1,"179":5,"181":25,"183":7}}],["accepts",{"2":{"52":3,"108":1,"179":4}}],["accept",{"2":{"52":3,"179":4,"183":1}}],["affection",{"2":{"181":1}}],["affects",{"2":{"8":1}}],["after",{"2":{"24":1,"52":5,"58":3,"67":2,"74":1,"88":1,"89":1,"97":1,"155":1,"156":1,"170":1,"178":1,"179":3,"181":11,"183":4}}],["amount",{"2":{"181":1}}],["among",{"2":{"174":1}}],["ambiguities",{"2":{"141":1}}],["amazing",{"2":{"102":1}}],["amazingly",{"2":{"22":1}}],["am",{"2":{"31":1,"41":1,"108":1,"181":6}}],["amp",{"0":{"4":1,"5":1,"6":1},"2":{"2":1,"3":1,"6":1,"7":1,"67":4,"183":4}}],["ah",{"2":{"12":1,"181":1}}],["administrator",{"2":{"173":1}}],["adhere",{"2":{"181":3}}],["adheres",{"2":{"142":1}}],["adherence",{"2":{"140":2,"144":1,"145":1,"147":1}}],["adapted",{"2":{"117":1,"118":1,"125":1,"127":1,"128":1}}],["adapt",{"2":{"91":1}}],["advisable",{"2":{"183":1}}],["advice",{"2":{"37":1,"58":2,"181":2}}],["advantages",{"2":{"61":1}}],["advancements",{"2":{"61":1}}],["advance",{"2":{"52":1,"179":1}}],["advancedgenerator",{"2":{"67":1,"181":1,"183":3}}],["advancedretriever",{"2":{"65":3,"67":4,"181":1,"183":7}}],["advanced",{"0":{"12":1,"35":1,"41":1},"2":{"48":1,"55":2,"67":3,"99":1,"114":1,"180":2,"181":1,"183":4}}],["adjectives",{"2":{"31":2,"108":8}}],["adjustments",{"2":{"140":2,"142":1}}],["adjusts",{"2":{"39":1,"41":1,"181":1}}],["adjust",{"2":{"24":1,"49":1,"174":1}}],["addresses",{"2":{"142":1}}],["addressed",{"2":{"130":1,"141":1}}],["address",{"2":{"130":1,"140":1,"174":1,"181":5}}],["addded",{"2":{"67":1,"183":1}}],["adding",{"2":{"57":1,"67":1,"78":1,"86":1,"108":1,"142":1,"156":1,"181":3,"183":1}}],["additionalproperties",{"2":{"181":1}}],["additional",{"2":{"52":1,"54":1,"63":1,"67":13,"97":1,"117":1,"118":1,"126":2,"155":1,"179":4,"181":46,"183":22}}],["addition",{"2":{"10":2,"18":1,"31":1,"63":1,"67":1,"106":1,"170":4,"178":4,"181":2,"183":2}}],["added",{"2":{"24":1,"37":1,"52":1,"58":2,"67":1,"179":1,"181":5,"183":5}}],["adds",{"2":{"11":1,"179":1,"181":2,"183":3}}],["add",{"2":{"8":3,"11":1,"13":2,"20":1,"24":7,"32":1,"39":1,"42":1,"47":1,"51":1,"52":3,"67":9,"78":1,"79":1,"82":2,"86":1,"89":2,"92":1,"96":1,"108":2,"126":1,"155":1,"179":9,"181":18,"183":34}}],["agreements",{"2":{"163":1}}],["agreed",{"2":{"156":2}}],["agnostic",{"2":{"92":1}}],["agents",{"2":{"48":1,"183":3}}],["agentic",{"2":{"48":1,"52":1,"106":1,"179":2,"181":2,"182":1}}],["agent",{"0":{"21":1,"48":1},"1":{"49":1,"50":1,"51":1,"52":1},"2":{"21":1,"52":1,"179":2,"181":1}}],["agenttools",{"0":{"179":1},"2":{"10":3,"21":1,"48":3,"52":6,"57":1,"88":1,"106":2,"179":69,"181":35,"182":2}}],["age",{"2":{"19":2,"181":13}}],["against",{"2":{"61":1,"67":1,"181":1,"183":1}}],["again",{"2":{"11":1,"89":1,"181":4}}],["auditing",{"2":{"181":2}}],["audience",{"2":{"140":5,"165":5,"167":2,"174":2}}],["authorization",{"2":{"181":2}}],["authentication",{"2":{"181":3}}],["auth",{"2":{"181":2}}],["auto",{"2":{"52":5,"140":1,"141":1,"142":1,"179":1,"181":17}}],["automatically",{"2":{"17":1,"24":1,"49":2,"52":2,"60":1,"61":1,"66":1,"73":1,"79":1,"93":6,"108":3,"181":16,"183":1}}],["automatic",{"0":{"51":1,"93":1},"2":{"10":1,"49":1,"52":1,"78":1,"106":1,"108":1,"181":1}}],["augment",{"2":{"126":1}}],["augmented",{"0":{"1":1},"1":{"2":1},"2":{"1":1,"59":1,"67":2,"182":1,"183":3}}],["avg",{"2":{"7":2}}],["average",{"2":{"7":1,"67":1,"121":1,"183":3}}],["available",{"2":{"6":1,"10":1,"13":2,"23":2,"24":4,"26":2,"32":2,"64":1,"66":2,"67":4,"73":1,"84":2,"86":1,"105":1,"106":1,"108":1,"136":1,"138":1,"155":1,"181":24,"183":7}}],["avoiding",{"2":{"61":1,"120":1}}],["avoided",{"2":{"11":1}}],["avoid",{"2":{"2":1,"42":1,"52":2,"58":1,"67":1,"74":1,"98":1,"130":1,"131":1,"171":1,"172":1,"181":3,"183":5}}],["april",{"2":{"15":1}}],["apostrophes",{"2":{"183":1}}],["apos",{"2":{"7":12}}],["appends",{"2":{"181":3}}],["append",{"2":{"179":1}}],["appended",{"2":{"52":1,"181":2,"183":1}}],["approximates",{"2":{"179":2}}],["appropriate",{"2":{"136":1,"138":3,"150":1,"181":1}}],["approach>",{"2":{"177":4}}],["approach",{"2":{"52":1,"106":1,"124":1,"169":4,"171":2,"172":1,"177":4,"179":1,"183":3}}],["appreciate",{"2":{"12":1}}],["applying",{"2":{"179":1,"183":1}}],["apply",{"2":{"58":1,"67":1,"144":1,"181":4,"183":2}}],["apples",{"2":{"181":2}}],["apple",{"2":{"31":2,"37":1,"108":8,"181":1}}],["applicable",{"2":{"67":1,"157":1,"183":2}}],["applications",{"2":{"10":1,"59":3,"60":1,"61":1,"67":1,"110":1,"114":1,"115":1,"117":1,"118":1,"120":1,"121":1,"122":1,"124":1,"125":1,"127":1,"128":1,"166":1,"170":1,"178":1,"183":1}}],["application",{"2":{"2":1,"61":1,"100":1,"101":1}}],["applied",{"2":{"10":1,"49":1,"52":1,"58":1,"60":1,"106":1,"179":1,"181":15,"183":1}}],["applies",{"2":{"2":1,"58":1,"66":1,"179":2,"181":1}}],["app",{"2":{"4":1,"84":1,"170":1,"178":1}}],["apikey",{"0":{"73":1}}],["apitools",{"0":{"53":1,"180":1},"1":{"54":1,"55":1},"2":{"53":2,"55":1,"180":4,"181":2,"182":1}}],["apis",{"0":{"23":1,"25":1,"27":1},"1":{"26":1,"27":1,"28":1,"29":1,"30":1,"31":1},"2":{"23":1,"36":1,"53":1,"69":1,"70":1,"77":1,"100":1,"103":1,"182":1}}],["api",{"0":{"23":1,"72":1,"73":2,"79":1,"80":1,"81":2,"86":1,"92":1,"101":1},"2":{"0":7,"2":1,"7":1,"8":1,"10":2,"20":1,"21":3,"23":10,"25":1,"26":4,"27":8,"28":1,"29":5,"30":2,"31":2,"32":4,"37":1,"42":1,"51":3,"52":8,"54":2,"55":5,"59":1,"65":5,"67":34,"71":3,"72":3,"73":5,"75":2,"76":1,"78":4,"79":10,"80":4,"81":1,"84":1,"86":3,"92":2,"93":3,"95":9,"100":2,"101":3,"102":2,"106":4,"107":7,"108":7,"179":10,"180":11,"181":354,"182":1,"183":69}}],["abbreviations",{"2":{"126":1}}],["ab",{"2":{"58":1,"181":2}}],["abcabc",{"2":{"58":1,"181":1}}],["abc",{"2":{"52":3,"58":7,"106":2,"179":3,"181":9,"183":7}}],["ability",{"2":{"31":1,"117":1,"118":1}}],["abilities",{"2":{"23":1,"26":1}}],["about",{"2":{"13":2,"24":1,"37":1,"49":1,"67":1,"88":1,"92":2,"93":2,"104":1,"107":1,"140":1,"164":1,"165":1,"166":1,"167":2,"168":1,"170":1,"171":1,"172":1,"173":1,"176":1,"178":1,"181":19,"183":4}}],["above",{"2":{"10":1,"13":2,"15":1,"17":1,"28":1,"57":1,"60":1,"67":1,"77":1,"106":2,"107":1,"112":1,"121":1,"130":1,"131":1,"132":1,"155":1,"163":1,"171":1,"172":1,"174":1,"179":1,"181":5,"183":2}}],["abs",{"2":{"52":1,"179":1}}],["absence",{"2":{"12":1}}],["abstractfloat",{"2":{"183":4}}],["abstractextracteddata",{"2":{"181":1}}],["abstractembedder",{"2":{"64":2,"67":2,"183":10}}],["abstractdocumenttermmatrix",{"2":{"183":1}}],["abstractdocumentindex",{"2":{"67":4,"183":17}}],["abstractdict",{"2":{"181":4}}],["abstractdatamessage",{"2":{"181":1}}],["abstractprocessor=keywordsprocessor",{"2":{"183":1}}],["abstractprocessor",{"2":{"67":2,"183":7}}],["abstractpromptschema",{"2":{"0":1,"52":1,"100":1,"101":1,"102":2,"179":1,"181":6}}],["abstractpostprocessor",{"2":{"64":1,"67":2,"183":4}}],["abstractstreamflavor",{"2":{"181":1}}],["abstractstreamcallback",{"2":{"181":4}}],["abstractstring=",{"2":{"52":4,"58":2,"181":8,"183":1}}],["abstractstring",{"2":{"10":2,"52":6,"55":8,"58":15,"64":3,"67":13,"106":2,"179":4,"180":10,"181":118,"183":89}}],["abstractsharegptschema",{"2":{"181":1}}],["abstractscoringmethod",{"2":{"179":4}}],["abstractsimilarityfinder",{"2":{"64":1,"67":1,"183":11}}],["abstractgenerationmethod",{"2":{"183":1}}],["abstractgenerator",{"2":{"64":3,"67":4,"181":1,"183":8}}],["abstractgoogleschema",{"2":{"0":1,"181":2}}],["abstracttracer",{"2":{"181":1}}],["abstracttracermessage",{"2":{"181":2}}],["abstracttracerschema",{"2":{"181":14}}],["abstracttrees",{"2":{"52":1,"179":1,"183":1}}],["abstracttool",{"2":{"181":11}}],["abstracttagfilter",{"2":{"64":1,"67":1,"183":8}}],["abstracttagger",{"2":{"64":2,"67":4,"183":12}}],["abstractragconfig",{"2":{"64":3,"67":2,"183":3}}],["abstractragresult",{"2":{"63":2,"64":4,"66":1,"67":4,"183":14}}],["abstractretrievalmethod",{"2":{"183":1}}],["abstractretriever",{"2":{"63":2,"64":2,"67":4,"181":1,"183":9}}],["abstractrefiner",{"2":{"64":1,"67":4,"183":8}}],["abstractrephraser",{"2":{"64":1,"67":2,"183":9}}],["abstractreranker",{"2":{"63":1,"64":2,"67":2,"183":10}}],["abstractindexbuilder",{"2":{"64":2,"67":2,"181":1,"183":7}}],["abstractindex",{"2":{"63":1}}],["abstractcodeblock",{"2":{"181":1}}],["abstractcodeoutcome",{"2":{"52":2,"179":2}}],["abstractcontextbuilder",{"2":{"64":1,"67":2,"183":4}}],["abstractchunker",{"2":{"64":1,"67":2,"183":7}}],["abstractchunkindex",{"2":{"64":4,"67":1,"181":1,"183":17}}],["abstractchar",{"2":{"58":1,"181":1}}],["abstractchatmessage",{"2":{"24":2,"89":1,"107":1,"181":3}}],["abstractcandidatechunks",{"2":{"63":1,"181":1,"183":6}}],["abstractvector",{"2":{"52":1,"55":2,"58":1,"67":6,"179":4,"180":2,"181":50,"183":54}}],["abstractmatrix",{"2":{"183":13}}],["abstractmanagedschema",{"2":{"0":1,"181":2}}],["abstractmultiindex",{"2":{"181":1,"183":2}}],["abstractmessage",{"2":{"52":2,"61":1,"87":1,"88":1,"92":2,"103":1,"104":1,"179":6,"181":110,"183":1}}],["abstractannotationstyler",{"2":{"183":4}}],["abstractannotatednode",{"2":{"183":5}}],["abstractanswerer",{"2":{"64":1,"67":2,"183":4}}],["abstractanthropicschema",{"2":{"0":1,"181":8}}],["abstractarray",{"2":{"10":1,"106":1}}],["abstractoutcomes",{"2":{"179":1}}],["abstractollamamanagedschema",{"2":{"0":1,"181":6}}],["abstractollamaschema",{"2":{"0":1,"181":3}}],["abstractopenaischema",{"2":{"0":9,"86":2,"101":1,"102":1,"181":13}}],["abstract",{"2":{"0":1,"66":1,"67":2,"88":1,"108":2,"171":1,"172":1,"181":1,"183":8}}],["able",{"2":{"2":1,"179":1}}],["arxiv",{"2":{"183":1}}],["architectures",{"2":{"181":2}}],["arches",{"2":{"66":1}}],["arr",{"2":{"89":2,"181":2,"183":3}}],["arrays",{"2":{"171":1,"172":1,"181":1}}],["array",{"2":{"13":1,"22":1,"24":1,"45":1,"58":3,"61":3,"89":1,"181":8,"183":5}}],["arbitrary",{"2":{"88":1,"181":3}}],["art",{"2":{"69":1}}],["artificial",{"2":{"16":1}}],["articles",{"2":{"61":1,"181":2}}],["article",{"2":{"1":1}}],["arg2",{"2":{"181":1}}],["arg1",{"2":{"181":1}}],["arg",{"2":{"181":8}}],["argmin",{"2":{"58":2,"181":2}}],["argmax",{"2":{"58":1,"181":1}}],["args",{"2":{"52":5,"179":11,"181":7,"183":1}}],["argumenterror",{"0":{"73":2}}],["arguments",{"0":{"65":1,"81":1},"2":{"10":4,"12":1,"19":1,"21":1,"49":2,"52":10,"55":1,"58":5,"63":2,"65":2,"67":17,"98":1,"106":4,"108":2,"144":1,"145":1,"147":1,"149":1,"179":27,"180":1,"181":108,"183":39}}],["argument",{"2":{"6":1,"7":8,"10":2,"15":1,"21":2,"23":1,"24":1,"26":1,"39":1,"45":1,"51":2,"52":4,"58":1,"61":2,"63":2,"74":1,"87":2,"89":1,"106":2,"144":1,"145":1,"147":1,"179":5,"181":16,"183":1}}],["around",{"2":{"10":1,"58":1,"63":1,"67":1,"98":1,"102":1,"106":1,"174":1,"179":1,"181":5,"183":5}}],["areas",{"2":{"130":1}}],["are",{"2":{"0":1,"5":1,"6":1,"7":8,"10":1,"11":1,"12":1,"13":3,"17":1,"19":1,"21":1,"23":5,"24":12,"26":2,"27":3,"36":2,"37":1,"41":1,"49":3,"51":1,"52":12,"57":4,"58":6,"60":3,"61":4,"63":4,"64":1,"66":2,"67":11,"69":1,"70":1,"75":1,"78":1,"84":1,"85":1,"86":1,"88":2,"89":1,"92":1,"93":1,"100":2,"102":2,"103":2,"104":2,"105":2,"106":2,"107":4,"108":2,"112":2,"118":3,"120":1,"125":1,"126":1,"127":1,"128":1,"130":4,"136":1,"137":1,"138":3,"140":1,"142":2,"144":1,"145":1,"147":1,"150":1,"153":1,"155":1,"156":2,"159":1,"160":1,"162":2,"163":1,"164":2,"165":1,"166":2,"167":2,"168":1,"169":1,"170":3,"171":1,"172":1,"173":2,"174":4,"176":1,"177":1,"178":3,"179":14,"181":81,"183":41}}],["atop",{"2":{"181":1}}],["atomic",{"2":{"67":10,"183":29}}],["ate",{"2":{"31":1,"108":3,"181":1}}],["attribute",{"2":{"183":2}}],["attract",{"2":{"127":1}}],["attempted",{"2":{"52":1,"179":3}}],["attempts",{"2":{"49":1,"52":2,"88":1,"179":3}}],["attempt",{"2":{"21":1,"51":1,"52":2,"179":2}}],["attach",{"2":{"181":5}}],["attached",{"2":{"12":1,"35":1,"41":1,"181":2}}],["attachments",{"2":{"12":1}}],["attachment",{"2":{"12":1,"35":1,"181":3}}],["at",{"2":{"1":1,"6":1,"7":1,"21":2,"23":1,"27":1,"31":1,"32":1,"34":1,"36":1,"43":1,"48":1,"51":2,"52":5,"54":1,"63":1,"66":1,"67":4,"69":1,"71":1,"74":1,"85":1,"100":1,"105":1,"107":1,"115":2,"121":2,"157":1,"179":12,"181":23,"183":11}}],["aspect",{"2":{"152":1}}],["aspects",{"2":{"140":1,"141":1,"142":1,"170":1,"178":1}}],["as=",{"2":{"89":2,"105":1,"181":1}}],["assesses",{"2":{"183":2}}],["assess",{"2":{"140":1}}],["assertion",{"2":{"181":1}}],["assertions",{"2":{"21":1,"52":1,"179":1}}],["assert",{"2":{"52":1,"108":2,"179":1,"183":2}}],["assigning",{"2":{"174":1}}],["assign",{"2":{"114":1,"121":1}}],["assistance",{"2":{"23":1,"26":1,"42":1}}],["assistant",{"2":{"12":1,"34":1,"87":1,"92":1,"93":1,"105":1,"107":2,"110":1,"112":1,"117":1,"118":1,"121":2,"122":1,"124":1,"126":1,"128":1,"140":3,"141":7,"142":2,"150":1,"160":2,"161":1,"162":1,"181":22}}],["assistantask",{"0":{"160":1},"2":{"10":1,"105":4,"106":1,"107":3}}],["assist",{"2":{"22":1,"23":1,"26":1,"30":2,"31":1,"34":1,"87":1,"181":10}}],["associated",{"2":{"85":1,"107":1,"156":1,"181":5,"183":1}}],["assuming",{"2":{"67":1,"75":1,"77":1,"181":1,"183":3}}],["assumed",{"2":{"179":1,"181":1,"183":4}}],["assumes",{"2":{"37":1,"67":1,"181":5,"183":5}}],["assume",{"2":{"22":1,"74":1,"96":1,"179":1,"183":2}}],["asterisk",{"2":{"11":1}}],["asynchronous",{"0":{"14":1},"2":{"98":1,"181":5}}],["asyncmap",{"2":{"7":1,"14":3,"46":2,"67":1,"74":3,"98":1,"183":4}}],["async",{"2":{"7":1,"46":1,"181":1}}],["asks",{"2":{"132":1,"163":2}}],["ask=",{"2":{"52":1,"106":2,"107":2,"179":1}}],["asked",{"0":{"68":1},"1":{"69":1,"70":1,"71":1,"72":1,"73":1,"74":1,"75":1,"76":1,"77":1,"78":1,"79":1,"80":1,"81":1,"82":1,"83":1,"84":1,"85":1,"86":1,"87":1,"88":1,"89":1,"90":1,"91":1,"92":1,"93":1},"2":{"37":1,"183":1}}],["asking",{"2":{"13":1,"24":2,"160":1,"164":1,"166":1,"168":1,"173":1,"176":1,"181":2}}],["ask",{"2":{"2":1,"13":4,"21":2,"23":1,"24":8,"26":1,"31":1,"34":1,"42":1,"51":2,"52":2,"57":1,"60":1,"105":3,"107":7,"131":1,"160":3,"164":3,"166":3,"168":3,"173":3,"176":3,"179":2,"181":6,"183":2}}],["as",{"2":{"0":3,"2":1,"6":1,"7":4,"10":9,"11":1,"15":1,"16":1,"17":1,"18":1,"20":1,"21":2,"23":5,"24":5,"26":2,"27":2,"29":3,"30":2,"31":4,"32":1,"34":1,"39":1,"42":2,"43":2,"47":1,"48":1,"49":2,"51":3,"52":13,"53":1,"54":1,"58":15,"59":1,"61":2,"63":3,"66":1,"67":1,"74":1,"78":2,"79":3,"87":4,"88":4,"89":5,"90":1,"92":3,"93":1,"95":1,"98":2,"101":1,"104":1,"105":2,"106":7,"107":2,"108":12,"110":1,"114":1,"117":2,"118":1,"121":1,"124":2,"125":2,"130":3,"140":6,"141":1,"142":2,"144":3,"145":3,"147":3,"153":2,"155":6,"156":4,"157":3,"161":2,"163":4,"165":2,"167":1,"170":3,"174":1,"178":3,"179":28,"181":133,"183":34}}],["a",{"0":{"1":1,"4":1,"5":1,"6":1,"89":1,"90":1,"91":1},"1":{"2":1},"2":{"0":3,"1":2,"2":6,"3":3,"4":1,"5":2,"6":9,"7":16,"8":2,"10":14,"11":6,"12":7,"13":7,"15":3,"16":1,"17":4,"18":3,"19":6,"20":5,"21":6,"22":1,"23":3,"24":23,"25":1,"26":3,"28":2,"29":1,"30":3,"31":4,"32":1,"34":2,"35":2,"36":2,"37":1,"39":1,"40":1,"41":5,"42":3,"43":1,"45":1,"47":1,"48":1,"49":12,"51":3,"52":47,"55":2,"56":2,"57":12,"58":35,"59":2,"60":6,"61":8,"63":6,"66":6,"67":42,"73":2,"74":5,"75":1,"76":4,"77":8,"78":1,"79":5,"84":3,"86":2,"87":5,"88":14,"89":11,"91":7,"92":3,"93":4,"95":1,"97":2,"100":1,"101":2,"102":1,"103":4,"104":1,"105":8,"106":14,"107":6,"108":24,"110":1,"114":2,"117":1,"118":1,"120":4,"121":4,"122":4,"124":3,"125":3,"126":4,"127":1,"128":4,"130":4,"131":1,"132":1,"136":2,"138":2,"140":7,"141":7,"142":5,"144":2,"145":2,"147":2,"150":5,"152":4,"153":1,"155":9,"156":6,"157":3,"160":1,"161":7,"162":2,"163":5,"164":1,"165":3,"166":1,"167":5,"168":1,"169":1,"170":8,"171":2,"172":3,"173":1,"174":6,"176":1,"177":2,"178":8,"179":85,"180":2,"181":625,"183":216}}],["al",{"2":{"183":3}}],["algorithm",{"2":{"183":5}}],["algorithms",{"2":{"52":1,"61":1,"179":2}}],["alignment",{"2":{"144":1,"145":1,"147":1}}],["aligns",{"2":{"140":1,"181":2,"183":2}}],["aligned",{"2":{"125":1,"127":1,"183":1}}],["align",{"2":{"121":1,"140":1,"142":1,"181":7,"183":4}}],["aliased",{"2":{"31":1}}],["aliases",{"0":{"15":1},"2":{"15":5,"29":1,"37":1,"42":1,"78":1,"86":2,"181":36}}],["alias",{"2":{"7":1,"29":1,"30":1,"31":1,"33":1,"42":1,"108":2,"181":36,"183":1}}],["alexander",{"2":{"120":3}}],["almost",{"2":{"63":1}}],["alternative",{"2":{"126":1,"181":7}}],["alternatives",{"0":{"83":1},"2":{"70":1}}],["alternatively",{"2":{"24":1,"52":1,"84":1,"95":1,"179":2,"181":3}}],["alter",{"2":{"52":1,"181":1}}],["already",{"2":{"30":1,"31":1,"37":1,"67":1,"74":1,"84":1,"96":1,"179":1,"183":4}}],["always",{"2":{"23":1,"24":1,"26":1,"28":1,"52":2,"57":1,"58":2,"63":1,"71":1,"72":1,"76":1,"88":1,"89":2,"130":2,"138":1,"140":1,"141":1,"142":1,"172":1,"179":5,"181":6,"183":4}}],["also",{"2":{"2":1,"6":1,"7":2,"10":1,"12":1,"15":1,"18":2,"21":1,"22":1,"23":1,"24":4,"28":1,"29":2,"30":1,"31":1,"51":1,"52":8,"55":1,"58":2,"67":7,"80":1,"86":1,"88":1,"89":1,"93":1,"106":1,"108":2,"130":1,"131":1,"132":1,"179":7,"180":1,"181":42,"183":19}}],["all=false",{"2":{"181":7}}],["all=true`",{"2":{"106":1}}],["all=true",{"2":{"2":1,"61":1,"67":2,"87":4,"88":1,"93":1,"106":1,"108":1,"181":22,"183":2}}],["alltagfilter",{"2":{"181":1,"183":4}}],["allocated",{"2":{"183":1}}],["allocations",{"2":{"183":1}}],["allocation",{"2":{"61":1}}],["allowing",{"2":{"59":1,"179":1,"181":2}}],["allow",{"2":{"49":1,"83":1,"108":1,"181":4,"183":1}}],["allowed",{"2":{"21":1,"52":1,"108":1,"179":2,"181":34}}],["allows",{"2":{"10":1,"21":1,"22":1,"25":1,"42":1,"49":3,"51":1,"52":2,"67":2,"76":1,"106":1,"179":4,"181":9,"183":2}}],["all",{"2":{"0":2,"2":1,"6":3,"7":9,"10":3,"11":2,"12":1,"13":1,"15":1,"19":1,"23":1,"24":1,"26":1,"35":1,"49":2,"52":16,"58":1,"61":1,"63":1,"65":2,"66":1,"67":12,"87":1,"88":1,"89":2,"92":4,"96":1,"99":1,"101":1,"102":1,"105":1,"106":3,"108":1,"114":1,"121":3,"130":2,"131":1,"153":1,"156":2,"170":1,"171":1,"172":1,"174":2,"178":1,"179":30,"181":82,"183":26}}],["along",{"2":{"0":1,"67":1,"140":1,"179":1,"183":2}}],["anonymous",{"2":{"171":1,"172":1}}],["another",{"2":{"11":1,"52":1,"67":3,"179":2,"181":1,"183":5}}],["annotation",{"2":{"183":3}}],["annotations",{"2":{"67":1,"171":1,"172":1,"183":3}}],["annotating",{"2":{"183":1}}],["annotatednode",{"2":{"181":1,"183":11}}],["annotated",{"2":{"67":6,"183":6}}],["annotates",{"2":{"67":1,"183":1}}],["annotater",{"2":{"67":6,"183":10}}],["annotate",{"2":{"10":1,"60":1,"61":1,"67":8,"181":3,"183":16}}],["ancient",{"2":{"58":1,"181":1}}],["ancestors",{"2":{"52":1,"179":5}}],["ancestor",{"2":{"52":1,"179":1}}],["animal",{"2":{"18":2,"88":2,"181":8}}],["ans",{"2":{"10":5,"97":1,"106":5}}],["answer=",{"2":{"183":4}}],["answer=answer",{"2":{"183":1}}],["answering",{"2":{"121":1,"144":1}}],["answered",{"2":{"67":1,"120":1,"156":1,"183":1}}],["answerer",{"2":{"65":1,"67":11,"183":17}}],["answers",{"2":{"3":1,"8":1,"59":1,"60":1,"110":1,"117":1,"118":1,"120":1,"124":2,"125":1,"140":1,"141":3,"142":1,"155":1,"164":2,"166":2,"173":2,"183":5}}],["answer",{"2":{"2":3,"5":1,"6":4,"7":4,"10":4,"11":2,"17":1,"21":3,"23":1,"24":6,"26":1,"31":1,"51":3,"52":16,"54":1,"55":4,"60":3,"61":7,"63":1,"64":2,"66":1,"67":33,"88":1,"105":2,"106":1,"107":4,"110":4,"117":20,"118":22,"120":6,"121":14,"122":7,"124":1,"125":1,"126":1,"137":1,"141":2,"142":3,"160":2,"164":2,"166":2,"168":2,"173":2,"176":2,"179":13,"180":4,"181":9,"183":103}}],["antropic",{"2":{"181":1}}],["antibiotics",{"2":{"120":2}}],["anti",{"2":{"7":1}}],["anthropicstream",{"2":{"181":3}}],["anthropicschema",{"2":{"0":1,"181":4}}],["anthropic",{"2":{"0":1,"69":1,"70":1,"144":1,"145":1,"176":1,"177":1,"178":1,"181":28}}],["analystthemesinresponses",{"0":{"157":1}}],["analystdecisionsintranscript",{"0":{"156":1}}],["analyst",{"2":{"155":1,"156":1}}],["analystchaptersintranscript",{"0":{"155":1}}],["analysis",{"2":{"6":1,"7":1,"17":1,"24":4,"52":1,"61":2,"157":1,"179":1}}],["analyzed",{"2":{"157":1}}],["analyze",{"2":{"7":1,"120":1,"127":1,"140":1,"141":1,"142":1,"144":1,"145":1,"147":1,"156":1}}],["anytagfilter",{"2":{"181":1,"183":4}}],["anything",{"2":{"31":1,"34":1,"39":1,"40":1,"42":1,"155":1,"181":3,"183":1}}],["anymore",{"2":{"85":1}}],["anyone",{"2":{"72":1,"95":1}}],["anywhere",{"0":{"82":1},"2":{"22":1,"82":1,"84":1}}],["anyscale",{"2":{"8":1}}],["any",{"2":{"0":2,"2":1,"4":1,"6":2,"7":2,"10":2,"11":3,"13":1,"17":1,"18":4,"21":1,"23":2,"24":2,"25":1,"26":1,"28":1,"31":2,"32":1,"34":1,"35":1,"42":1,"49":3,"51":1,"52":13,"58":1,"59":1,"63":2,"66":1,"67":1,"72":1,"73":1,"77":1,"78":1,"79":1,"82":1,"88":3,"89":3,"91":1,"92":2,"93":2,"95":1,"97":1,"102":1,"106":2,"107":1,"108":9,"112":1,"114":2,"115":1,"118":1,"120":1,"126":2,"130":1,"140":1,"141":1,"142":3,"152":2,"155":2,"156":2,"157":1,"159":1,"169":1,"170":2,"171":1,"172":2,"174":1,"177":1,"178":2,"179":10,"181":110,"183":23}}],["an",{"0":{"73":2,"74":1},"2":{"0":2,"2":3,"6":1,"7":3,"10":4,"12":1,"17":1,"19":1,"20":1,"21":1,"22":1,"32":1,"41":1,"42":2,"48":1,"49":3,"51":1,"52":13,"53":1,"54":2,"55":3,"57":1,"58":5,"59":1,"60":2,"61":4,"63":2,"64":1,"66":2,"67":11,"72":2,"73":1,"77":1,"78":1,"79":1,"88":3,"91":2,"92":1,"95":3,"99":2,"100":1,"101":2,"102":1,"105":1,"106":4,"108":8,"110":1,"112":1,"117":2,"118":2,"121":5,"122":1,"124":1,"126":1,"128":1,"130":1,"131":2,"137":1,"138":1,"140":1,"142":1,"150":1,"152":1,"157":1,"167":1,"171":1,"174":2,"179":22,"180":3,"181":144,"183":34}}],["and",{"0":{"20":1,"23":1,"71":1},"2":{"0":6,"1":5,"2":12,"3":2,"4":2,"6":3,"7":17,"8":5,"10":13,"11":3,"12":3,"13":2,"16":2,"17":3,"18":1,"19":4,"21":9,"22":2,"23":6,"24":15,"26":2,"27":3,"28":2,"29":2,"30":3,"31":3,"32":2,"34":1,"35":2,"36":2,"37":4,"41":2,"42":5,"45":1,"47":1,"48":1,"49":11,"51":9,"52":58,"53":1,"54":2,"56":2,"57":6,"58":10,"59":1,"60":1,"61":11,"63":7,"65":3,"66":5,"67":47,"69":1,"70":1,"71":1,"72":2,"73":2,"74":3,"75":2,"76":2,"77":2,"78":2,"79":1,"80":2,"82":1,"83":1,"84":3,"85":2,"86":3,"87":1,"88":13,"89":6,"90":3,"91":1,"92":5,"93":4,"95":4,"96":1,"97":2,"98":2,"99":2,"100":2,"101":1,"102":2,"104":2,"105":5,"106":14,"107":10,"108":27,"110":1,"114":6,"115":1,"117":3,"118":4,"120":5,"121":3,"122":3,"124":2,"125":1,"126":3,"127":1,"128":1,"130":10,"131":4,"132":1,"136":2,"138":2,"140":16,"141":5,"142":10,"144":5,"145":4,"147":4,"149":1,"150":4,"152":1,"153":2,"155":17,"156":16,"157":3,"159":1,"160":2,"161":5,"162":2,"163":9,"164":3,"165":2,"166":2,"167":7,"168":2,"169":4,"170":5,"171":2,"172":3,"173":4,"174":10,"176":2,"177":3,"178":5,"179":98,"180":1,"181":266,"182":2,"183":190}}],["aims",{"2":{"183":4}}],["aimessage>",{"2":{"11":1,"181":1}}],["aimessage",{"2":{"2":1,"10":3,"12":2,"20":2,"22":1,"23":1,"24":1,"26":1,"30":1,"31":1,"34":1,"35":1,"36":1,"39":1,"40":1,"41":1,"42":1,"49":1,"52":6,"61":2,"63":1,"64":2,"87":2,"89":2,"97":2,"98":1,"100":2,"104":1,"106":3,"107":2,"179":11,"181":73}}],["aiprefill",{"2":{"181":5}}],["aiagent",{"2":{"179":1}}],["aihelpme",{"2":{"89":1}}],["air",{"2":{"58":1,"181":1}}],["airetry",{"0":{"21":1},"2":{"10":1,"21":8,"49":3,"51":8,"52":14,"88":6,"106":1,"108":6,"179":15,"181":1}}],["airag",{"2":{"2":2,"6":1,"7":2,"10":1,"60":1,"61":3,"64":1,"65":1,"66":1,"67":7,"181":1,"183":25}}],["aiclassifier",{"2":{"88":1}}],["aiclassify",{"2":{"0":1,"10":2,"13":1,"17":1,"18":2,"52":1,"88":4,"106":1,"136":1,"179":7,"181":21}}],["aicodefixer",{"2":{"49":2,"52":9,"130":1,"131":1,"132":1,"179":31,"181":2,"182":1}}],["aicode",{"2":{"49":3,"52":16,"179":6,"181":13}}],["aicallblock",{"2":{"52":4,"179":13}}],["aicall",{"2":{"10":2,"21":9,"49":3,"51":9,"52":53,"106":2,"108":4,"179":102,"181":2}}],["aitoolrequest",{"2":{"181":4}}],["aitools",{"2":{"0":1,"106":1,"181":16}}],["aitemplatemetadata",{"2":{"13":3,"24":2,"89":2,"181":14}}],["aitemplate",{"2":{"13":3,"24":4,"67":1,"92":2,"105":2,"107":2,"179":4,"181":19,"183":1}}],["aitemplates",{"0":{"24":1},"2":{"10":4,"13":4,"24":3,"89":1,"105":2,"106":3,"181":23,"183":2}}],["aiimage",{"2":{"0":2,"10":2,"106":1,"181":8}}],["aiscan",{"0":{"43":1},"2":{"0":2,"10":2,"20":3,"43":1,"106":1,"179":6,"181":19}}],["aiextract",{"0":{"108":1},"2":{"0":2,"6":1,"10":2,"19":2,"31":2,"52":1,"67":3,"104":1,"106":2,"108":5,"144":1,"145":1,"147":1,"179":7,"181":44,"183":6}}],["aiembed",{"0":{"44":1},"1":{"45":1,"46":1,"47":1},"2":{"0":1,"10":2,"16":3,"22":3,"23":1,"27":1,"29":1,"30":1,"31":1,"45":2,"46":2,"47":1,"52":1,"67":2,"104":1,"106":2,"179":7,"181":26,"183":3}}],["aigenerate",{"0":{"33":1,"38":1,"81":1,"98":1,"107":1},"1":{"34":1,"35":1,"36":1,"39":1,"40":1,"41":1,"42":1},"2":{"0":1,"10":9,"12":4,"13":3,"14":2,"21":5,"22":2,"23":4,"24":1,"26":2,"27":2,"28":1,"29":2,"30":1,"31":1,"32":1,"34":1,"35":1,"37":1,"39":1,"40":2,"41":1,"42":2,"49":10,"51":5,"52":18,"67":6,"73":1,"74":1,"78":3,"87":3,"88":6,"92":2,"93":3,"98":1,"99":1,"100":1,"106":11,"107":2,"108":11,"179":38,"181":67,"182":1,"183":16}}],["ai",{"0":{"10":1,"30":1,"31":1,"32":1,"37":1,"51":1,"97":1,"106":1},"1":{"33":1,"34":1,"35":1,"36":1,"38":1,"39":1,"40":1,"41":1,"42":1,"43":1,"44":1,"45":1,"46":1,"47":1},"2":{"0":5,"1":1,"10":12,"11":1,"12":1,"14":1,"15":3,"16":1,"17":1,"21":4,"22":1,"23":2,"24":1,"27":2,"29":1,"30":2,"31":3,"32":2,"34":1,"36":1,"37":2,"49":16,"51":1,"52":20,"56":1,"57":2,"58":1,"59":1,"60":1,"61":1,"67":6,"69":2,"73":1,"82":1,"83":1,"84":1,"87":6,"89":2,"91":1,"92":3,"97":5,"98":3,"100":2,"102":1,"103":1,"104":3,"105":3,"106":13,"107":3,"108":8,"110":1,"117":2,"118":4,"120":1,"121":1,"124":1,"137":1,"140":4,"141":5,"142":2,"150":1,"155":1,"156":1,"160":1,"161":1,"162":1,"174":3,"179":51,"181":115,"183":14}}],["cc",{"2":{"183":1}}],["cfg",{"2":{"64":1,"65":1,"67":9,"183":19}}],["cn",{"2":{"58":1,"181":1}}],["cb",{"2":{"52":15,"179":3,"181":36}}],["cpp",{"0":{"28":1},"2":{"25":1,"28":3,"70":1,"100":1,"101":1,"181":1}}],["c",{"2":{"19":1,"28":2,"58":1,"74":2,"76":2,"179":1,"181":1,"183":2}}],["city",{"2":{"19":3}}],["cerebras",{"2":{"181":6}}],["cerebrasopenaischema",{"2":{"181":2}}],["certainly",{"2":{"58":1,"181":1}}],["certain",{"2":{"0":1,"15":1,"88":1,"181":3}}],["ceo",{"2":{"157":1}}],["cents",{"2":{"77":2}}],["cent",{"2":{"75":1,"77":1}}],["celestial",{"2":{"58":2,"181":2}}],["celsius",{"2":{"19":2}}],["cumulative",{"2":{"183":2}}],["curr",{"2":{"183":3}}],["currently",{"2":{"23":1,"26":1,"52":1,"54":1,"64":1,"84":1,"179":2,"181":11}}],["currentweather",{"2":{"19":2}}],["current",{"2":{"19":2,"37":1,"52":1,"75":1,"88":1,"93":1,"96":1,"179":3,"181":10,"183":1}}],["curiosity",{"2":{"181":1}}],["customizing",{"2":{"179":1}}],["customized",{"2":{"86":1,"155":1}}],["customize",{"2":{"52":1,"61":1,"63":4,"67":6,"85":1,"86":1,"179":1,"183":8}}],["customer",{"0":{"78":1},"2":{"78":1}}],["custom",{"0":{"25":1,"42":1,"86":1},"1":{"26":1,"27":1,"28":1,"29":1,"30":1,"31":1},"2":{"52":1,"58":2,"59":1,"63":5,"65":5,"67":13,"86":3,"88":2,"179":1,"181":9,"183":14}}],["customopenaischema",{"2":{"0":3,"23":2,"27":2,"28":1,"181":7}}],["cut",{"2":{"15":1}}],["crucial",{"2":{"181":1}}],["crunchy",{"2":{"31":2,"108":2}}],["craft",{"2":{"163":1,"174":1}}],["critiquing",{"2":{"140":1}}],["critique>",{"2":{"130":1}}],["critiques",{"2":{"130":1,"140":1,"141":1,"142":2}}],["critique",{"2":{"67":1,"130":12,"140":2,"141":2,"183":1}}],["critic",{"0":{"139":1},"1":{"140":1,"141":1,"142":1},"2":{"140":1,"141":4,"142":1}}],["criticism",{"2":{"130":1}}],["criterion",{"2":{"121":1}}],["criteria",{"2":{"121":2,"183":1}}],["credit",{"2":{"108":1,"181":3}}],["credits",{"2":{"70":1,"75":1,"78":1,"95":2}}],["creativity",{"2":{"181":1}}],["creative",{"2":{"10":1,"106":1,"181":2}}],["creation",{"2":{"120":1}}],["creating",{"0":{"72":1},"2":{"61":1,"67":1,"163":1,"183":4}}],["creature",{"2":{"18":2,"88":1,"181":5}}],["createqafromcontext",{"2":{"67":1,"183":1}}],["creates",{"2":{"52":1,"66":1,"89":1,"92":1,"179":5,"181":3,"183":1}}],["create",{"0":{"89":1},"2":{"2":2,"10":1,"16":1,"24":2,"49":1,"52":1,"54":1,"55":3,"63":1,"67":5,"72":2,"86":3,"89":6,"91":1,"95":2,"100":1,"105":3,"106":1,"107":1,"108":1,"161":1,"169":1,"177":1,"179":3,"180":4,"181":27,"183":14}}],["cross",{"2":{"7":1}}],["ctx",{"2":{"6":6,"7":2,"183":4}}],["click",{"2":{"72":1,"95":1}}],["clipboard",{"2":{"52":2,"181":2}}],["clearly",{"2":{"63":1,"156":1,"157":1}}],["clear",{"2":{"52":1,"120":1,"121":3,"122":1,"130":1,"150":1,"155":1,"156":2,"157":1,"163":2,"164":1,"166":1,"167":1,"173":1,"179":1}}],["cleaning",{"2":{"162":1}}],["cleanup",{"2":{"24":1}}],["cleaner",{"2":{"24":1,"88":1}}],["cleaned",{"2":{"1":1}}],["clustering",{"2":{"16":1}}],["closely",{"2":{"58":2,"140":1,"153":1,"181":2}}],["close",{"2":{"58":2,"141":1,"163":1,"181":2}}],["closest",{"2":{"2":5,"58":4,"64":1,"66":1,"67":4,"181":9,"183":45}}],["cloudy",{"2":{"181":4}}],["cloud",{"2":{"12":1}}],["claudes",{"2":{"181":1}}],["claudeo",{"2":{"181":2}}],["claudeh",{"2":{"181":23}}],["claude",{"2":{"181":8}}],["clarification",{"2":{"141":1}}],["clarity",{"2":{"6":1,"121":1,"140":4,"141":1,"156":3,"171":1,"172":1}}],["classes=",{"2":{"183":3}}],["classes",{"2":{"183":5}}],["classifies",{"2":{"181":2}}],["classified",{"2":{"174":3,"181":1}}],["classification",{"0":{"17":1,"135":1},"1":{"18":1,"136":1,"137":1,"138":1},"2":{"18":1,"136":2,"137":1,"138":1,"181":4}}],["classify",{"2":{"10":1,"17":1,"106":1,"181":3}}],["class",{"2":{"13":1,"24":4,"104":1,"105":1,"107":2,"108":1,"110":1,"114":1,"117":1,"118":1,"120":1,"124":1,"125":1,"127":1,"136":1,"140":1,"141":1,"142":1,"144":1,"145":1,"147":1,"150":1,"153":1,"157":1,"160":1,"161":1,"162":1,"163":1,"164":1,"165":1,"166":1,"167":1,"168":1,"169":1,"170":1,"171":1,"172":1,"173":1,"176":1,"177":1,"178":1,"181":2,"183":1}}],["child2",{"2":{"179":2}}],["child11",{"2":{"179":2}}],["child1",{"2":{"179":3}}],["children",{"2":{"67":2,"179":2,"183":9}}],["chief",{"2":{"140":4}}],["chiefeditortranscriptcritic",{"0":{"140":1}}],["chuckles",{"2":{"41":2}}],["chunkdata",{"2":{"181":3,"183":13}}],["chunkkeywordsindex",{"2":{"67":4,"181":2,"183":13}}],["chunked",{"2":{"67":1,"183":8}}],["chunkembeddingsindex",{"2":{"67":3,"181":1,"183":7}}],["chunkers",{"2":{"183":1}}],["chunker=filechunker",{"2":{"67":1,"183":1}}],["chunker",{"2":{"61":3,"63":2,"67":14,"183":29}}],["chunking",{"2":{"63":1,"66":1,"67":1,"183":2}}],["chunkindex",{"2":{"60":1,"183":3}}],["chunk",{"2":{"2":3,"8":1,"10":1,"58":8,"61":2,"66":3,"67":6,"90":1,"114":1,"120":4,"181":31,"183":25}}],["chunks`",{"2":{"181":3}}],["chunks",{"2":{"2":13,"3":1,"4":2,"8":1,"58":13,"60":2,"61":2,"63":3,"64":3,"66":5,"67":35,"90":1,"112":1,"181":31,"183":146}}],["cheaper",{"2":{"21":1,"51":1,"52":1,"179":1,"181":2}}],["cheap",{"2":{"18":1,"75":1,"181":1}}],["checkout",{"2":{"58":1,"181":1}}],["check",{"2":{"21":1,"22":1,"51":1,"52":7,"67":1,"71":1,"73":1,"75":1,"79":1,"80":1,"84":1,"86":1,"88":3,"91":1,"95":1,"108":2,"142":1,"169":1,"171":1,"172":1,"177":1,"179":5,"181":14,"183":4}}],["checks",{"2":{"17":1,"51":1,"52":4,"77":1,"179":6,"181":2}}],["checking",{"2":{"17":1}}],["choice=",{"2":{"181":1}}],["choice",{"2":{"108":1,"138":3,"179":1,"181":19}}],["choices",{"2":{"10":3,"18":2,"88":2,"106":3,"107":1,"136":6,"138":6,"181":42}}],["chosen",{"2":{"21":1,"51":1,"52":1,"179":1}}],["choose",{"2":{"0":1,"157":1,"181":1,"183":2}}],["chapter",{"2":{"155":2}}],["chapters",{"2":{"155":4,"156":1}}],["chain",{"2":{"144":1,"169":1,"171":1,"177":1,"181":2}}],["chars",{"2":{"58":1,"181":1}}],["character",{"2":{"58":3,"153":1,"181":3,"183":1}}],["characters",{"2":{"36":1,"58":4,"130":1,"179":4,"181":5,"183":13}}],["charles",{"2":{"55":2,"180":2}}],["charge",{"2":{"36":2,"75":2,"181":1}}],["chance",{"2":{"183":1}}],["chances",{"2":{"21":1,"51":1,"52":1,"179":1}}],["channel",{"2":{"57":1,"60":1,"181":3}}],["changing",{"0":{"85":1},"2":{"52":1,"61":1,"63":1,"67":1,"181":1,"183":3}}],["changed",{"2":{"130":1}}],["changes",{"0":{"42":1},"2":{"37":1,"89":1,"118":1,"142":1,"179":1,"181":3,"183":4}}],["change",{"2":{"1":1,"21":1,"24":1,"42":1,"51":1,"52":1,"67":3,"86":1,"108":1,"130":1,"179":3,"181":9,"182":1,"183":8}}],["challenging",{"2":{"0":1}}],["chat1",{"2":{"181":1}}],["chatmlschema",{"2":{"181":5}}],["chatgpt",{"2":{"11":1,"75":1,"183":3}}],["chatbots",{"2":{"181":1}}],["chatbot",{"2":{"1":1,"2":1}}],["chat",{"2":{"0":1,"29":3,"39":1,"42":1,"67":8,"85":2,"86":2,"100":1,"107":2,"181":34,"183":25}}],["cababcab",{"2":{"181":1}}],["caching",{"2":{"181":11}}],["caches",{"2":{"181":6}}],["cache",{"2":{"73":1,"181":20}}],["caused",{"2":{"130":1}}],["causes",{"2":{"41":1}}],["caught",{"2":{"52":1,"179":1,"181":1}}],["capable",{"2":{"138":1,"181":1}}],["capabilities",{"2":{"52":1,"69":1,"181":1}}],["captioning",{"2":{"181":2}}],["captain",{"2":{"89":2,"181":2}}],["capturing",{"2":{"52":3,"153":1,"181":3}}],["captures",{"2":{"93":1,"152":1,"181":1}}],["captured",{"2":{"52":3,"181":3}}],["capture",{"2":{"52":5,"93":2,"108":1,"179":1,"181":11}}],["capital",{"2":{"15":1,"67":2,"97":5,"98":2,"107":4,"183":5}}],["casual",{"2":{"163":1}}],["cased",{"2":{"161":1}}],["cases",{"2":{"52":1,"170":1,"178":1,"179":1,"181":7}}],["case",{"2":{"24":1,"61":1,"69":1,"88":2,"91":1,"107":1,"114":1,"163":1,"181":3,"183":2}}],["castle",{"2":{"18":1,"181":1}}],["carries",{"2":{"171":1,"172":1}}],["carrying",{"2":{"58":1,"181":1}}],["cartoonish",{"2":{"152":2}}],["cartesian",{"2":{"7":1}}],["carefully",{"2":{"114":1,"122":1,"142":1,"157":1}}],["care",{"2":{"102":1}}],["car",{"2":{"88":2,"108":1,"181":7}}],["carve",{"2":{"59":1}}],["carlo",{"2":{"21":1,"49":1,"52":1,"179":3}}],["ca",{"2":{"19":1}}],["cat",{"2":{"181":7}}],["categorize",{"2":{"157":1}}],["categories",{"0":{"18":1},"2":{"18":1,"88":2,"114":1,"115":1,"138":2,"181":2}}],["category",{"2":{"88":2,"114":1,"115":1,"138":4,"181":1,"183":1}}],["catch",{"2":{"0":1,"19":1,"21":2,"49":1,"51":2,"52":4,"108":1,"179":9,"181":1}}],["camelcase",{"2":{"21":1,"181":1}}],["came",{"2":{"10":1,"183":1}}],["calculating",{"2":{"181":1}}],["calculation",{"2":{"181":1,"183":1}}],["calculates",{"2":{"183":3}}],["calculated",{"2":{"74":1,"174":1,"181":2}}],["calculate",{"2":{"7":1,"16":2,"181":10}}],["calltracer",{"2":{"181":2}}],["callable",{"2":{"181":9}}],["callbacks",{"2":{"181":1}}],["callback",{"2":{"181":34}}],["calling",{"2":{"24":1,"31":1,"37":1,"49":1,"52":1,"108":2,"144":1,"145":1,"147":1,"179":2,"181":12,"183":1}}],["call",{"2":{"10":2,"11":1,"15":1,"21":2,"24":1,"29":1,"49":3,"51":2,"52":15,"66":1,"67":10,"73":1,"77":1,"87":1,"89":2,"100":1,"105":1,"106":2,"108":2,"144":1,"145":1,"147":1,"179":44,"181":85,"183":19}}],["called",{"2":{"7":3,"10":1,"11":1,"21":1,"24":1,"49":1,"52":1,"66":4,"89":1,"97":1,"100":1,"106":1,"179":4,"181":3,"183":4}}],["calls",{"0":{"51":1},"2":{"0":2,"10":1,"21":4,"46":1,"49":2,"51":1,"52":6,"55":1,"65":1,"67":10,"71":1,"88":1,"106":2,"108":2,"144":1,"145":1,"147":1,"179":13,"180":1,"181":41,"182":1,"183":22}}],["cannot",{"0":{"70":1,"73":2},"2":{"17":1,"58":1,"67":1,"69":1,"78":1,"108":2,"137":1,"181":6,"183":4}}],["candidatechunks",{"2":{"61":4,"66":1,"67":1,"181":1,"183":13}}],["candidate",{"2":{"2":1,"183":17}}],["candidates",{"2":{"2":1,"61":4,"63":1,"66":3,"67":2,"183":40}}],["can",{"0":{"92":1},"2":{"2":1,"6":2,"7":7,"8":1,"10":4,"11":1,"12":4,"13":2,"14":2,"15":5,"16":2,"17":2,"18":2,"19":2,"20":4,"21":8,"22":3,"23":9,"24":11,"26":7,"27":1,"28":2,"29":3,"30":5,"31":4,"32":1,"33":1,"35":2,"39":1,"40":1,"42":2,"43":1,"46":2,"49":2,"51":7,"52":22,"55":2,"57":1,"58":4,"61":4,"63":1,"65":2,"66":2,"67":10,"69":1,"71":2,"72":1,"73":3,"74":2,"76":1,"77":2,"78":2,"79":1,"80":1,"82":2,"84":2,"85":3,"86":3,"87":3,"88":6,"89":8,"91":2,"92":2,"93":8,"95":1,"96":1,"97":1,"98":2,"99":1,"100":2,"101":2,"103":2,"105":3,"106":4,"107":1,"108":10,"112":1,"114":1,"120":1,"130":1,"131":1,"132":1,"155":1,"170":1,"178":1,"179":23,"180":2,"181":162,"183":27}}],["copies",{"2":{"183":1}}],["copy",{"2":{"2":1,"22":1,"45":2,"52":1,"181":3}}],["cognitive",{"2":{"181":2}}],["coding",{"2":{"179":1}}],["codeunits",{"2":{"181":3}}],["code>",{"2":{"130":1,"178":6}}],["codefixer",{"2":{"179":4}}],["codefixertiny",{"0":{"132":1}}],["codefixershort",{"0":{"131":1}}],["codefixerrci",{"0":{"130":1},"2":{"179":1}}],["codefailedtimeout",{"2":{"52":1,"179":1}}],["codefailedeval",{"2":{"52":1,"179":1}}],["codefailedparse",{"2":{"52":1,"179":1}}],["codellama",{"2":{"84":1}}],["codes",{"2":{"75":1}}],["codesuccess",{"2":{"52":1,"179":1}}],["codeempty",{"2":{"52":1,"179":1}}],["code",{"0":{"129":1},"1":{"130":1,"131":1,"132":1},"2":{"20":3,"23":1,"24":2,"26":1,"48":1,"49":1,"52":49,"57":2,"59":1,"67":4,"88":1,"104":1,"114":1,"130":13,"131":9,"132":3,"142":11,"167":1,"169":3,"170":12,"177":3,"178":10,"179":36,"181":94,"182":1,"183":14}}],["coalitional",{"2":{"174":1}}],["cot",{"2":{"169":1,"171":1,"177":1}}],["core",{"2":{"127":1,"179":1}}],["corpus",{"2":{"60":1}}],["corresponds",{"2":{"181":2}}],["correspondence",{"2":{"163":1}}],["correspond",{"2":{"61":1,"67":1,"179":1,"183":4}}],["corresponding",{"2":{"0":4,"12":1,"30":1,"31":1,"49":2,"63":2,"65":1,"66":2,"67":4,"70":1,"108":2,"142":1,"155":1,"156":1,"157":1,"174":1,"181":25,"183":9}}],["correctiverag",{"2":{"183":1}}],["correcting",{"2":{"142":1}}],["corrections",{"2":{"140":1}}],["correct",{"2":{"52":4,"88":1,"108":3,"130":1,"142":1,"169":1,"170":4,"171":1,"172":1,"177":1,"178":4,"179":4,"181":1,"183":1}}],["correctly",{"2":{"0":1,"174":3,"181":7}}],["covering",{"2":{"170":1,"178":1}}],["cover",{"2":{"121":1}}],["coversation",{"2":{"52":1,"179":1}}],["coverage",{"0":{"0":1}}],["collects",{"2":{"179":1}}],["collect",{"2":{"179":2,"181":2}}],["collection",{"2":{"24":1,"67":1,"183":1}}],["collaboration",{"2":{"163":1}}],["colorful",{"2":{"152":1,"181":1}}],["colors",{"2":{"67":1,"183":1}}],["color",{"2":{"21":2,"51":2,"52":2,"58":1,"61":3,"179":2,"181":7,"183":8}}],["column",{"2":{"7":2,"181":2,"183":4}}],["columns",{"2":{"6":1,"7":4,"183":1}}],["cosmic",{"2":{"58":1,"181":1}}],["cosinesimilarity",{"2":{"67":2,"181":1,"183":12}}],["cosine",{"2":{"16":2,"47":2,"181":4,"183":5}}],["cost2",{"2":{"181":2}}],["cost1",{"2":{"181":3}}],["costing",{"2":{"181":2}}],["costs",{"2":{"10":1,"67":1,"76":1,"106":1,"181":9,"183":1}}],["cost",{"0":{"77":1},"2":{"4":1,"11":1,"13":1,"20":2,"23":1,"24":1,"26":1,"30":1,"31":1,"61":1,"67":16,"69":1,"77":1,"87":1,"97":2,"98":1,"181":76,"183":50}}],["counds",{"2":{"179":1}}],["counts",{"2":{"183":1}}],["counted",{"2":{"179":1}}],["counter",{"2":{"67":3,"179":2,"183":7}}],["counterpart",{"2":{"49":1,"58":1,"181":1}}],["counterparts",{"2":{"10":1,"21":1,"49":1,"106":1}}],["counting",{"2":{"161":1}}],["country=",{"2":{"98":1}}],["country",{"2":{"97":1,"98":1,"183":4}}],["count",{"2":{"28":1,"37":1,"67":1,"110":1,"112":1,"114":1,"115":1,"117":1,"118":1,"120":1,"121":1,"122":1,"124":1,"125":1,"126":1,"127":1,"128":1,"130":1,"131":1,"132":1,"134":1,"136":1,"137":1,"138":1,"140":1,"141":1,"142":1,"144":1,"145":1,"147":1,"149":1,"150":1,"152":1,"153":1,"155":1,"156":1,"157":1,"160":1,"161":1,"162":1,"163":1,"164":1,"165":1,"166":1,"167":1,"168":1,"169":1,"170":1,"171":1,"172":1,"173":1,"174":1,"176":1,"177":1,"178":1,"181":14,"183":1}}],["couldn",{"2":{"181":1}}],["could",{"2":{"7":1,"8":1,"21":2,"34":1,"51":2,"52":2,"58":2,"67":2,"74":1,"92":1,"108":1,"126":1,"150":1,"179":2,"181":7,"183":2}}],["coherence",{"2":{"181":1}}],["coherereranker",{"2":{"67":1,"181":1,"183":5}}],["cohere",{"2":{"2":1,"8":1,"67":2,"181":6,"183":19}}],["conv",{"2":{"92":1,"93":2,"108":2,"181":36}}],["conventions",{"2":{"181":1}}],["convention",{"2":{"181":1}}],["convenient",{"2":{"181":2}}],["convenience",{"2":{"2":1,"58":2,"181":5,"183":1}}],["convey",{"2":{"100":1,"103":1}}],["conversion",{"2":{"108":1}}],["conversational",{"2":{"181":2}}],["conversation=myconversation",{"2":{"179":1}}],["conversationlabeler",{"0":{"161":1}}],["conversation2",{"2":{"91":1}}],["conversation1",{"2":{"91":1}}],["conversations",{"0":{"12":1,"87":1},"2":{"12":1,"35":1,"42":2,"52":1,"61":2,"67":2,"91":3,"93":2,"98":1,"179":1,"181":26,"183":9}}],["conversation",{"2":{"10":2,"12":6,"21":1,"30":1,"35":1,"41":1,"51":1,"52":25,"67":1,"87":11,"88":4,"89":1,"91":3,"93":2,"97":1,"103":2,"106":2,"108":3,"140":2,"141":5,"142":1,"161":6,"179":71,"181":240,"183":5}}],["converts",{"2":{"181":1}}],["converted",{"2":{"181":2}}],["convert",{"2":{"107":2,"108":1,"181":13,"183":4}}],["converting",{"2":{"78":1,"126":1,"128":1}}],["confusion",{"2":{"41":1}}],["confirm",{"2":{"181":2}}],["confident",{"2":{"24":3,"105":1,"107":2,"160":1,"164":1,"166":1,"168":1,"173":1,"176":1}}],["confidence",{"2":{"10":1,"58":1,"179":2,"181":1}}],["config=retryconfig",{"2":{"88":1,"108":1}}],["configures",{"2":{"181":1}}],["configure",{"2":{"181":9}}],["configuring",{"0":{"79":1}}],["configuration",{"2":{"67":1,"79":2,"179":1,"181":1,"183":3}}],["configurable",{"2":{"61":1}}],["config",{"2":{"21":2,"49":1,"51":2,"52":9,"67":1,"82":1,"108":2,"179":14,"181":1,"183":3}}],["connection",{"2":{"35":1}}],["conducted",{"2":{"179":1}}],["cond",{"2":{"21":1,"51":1,"52":6,"179":15}}],["condition=>string",{"2":{"181":1}}],["conditions",{"2":{"181":1}}],["condition",{"2":{"21":4,"51":5,"52":16,"88":3,"108":3,"179":31,"181":19}}],["concatenation",{"2":{"183":1}}],["concatenates",{"2":{"181":1}}],["concatenate",{"2":{"36":1,"52":1,"181":1,"183":1}}],["concentrate",{"2":{"174":1}}],["concepts",{"0":{"100":1},"1":{"101":1,"102":1,"103":1,"104":1,"105":1,"106":1},"2":{"99":1,"100":1,"126":1}}],["concept",{"2":{"16":1,"126":1}}],["conclusion",{"2":{"167":1}}],["conclusions",{"2":{"155":1}}],["conclude",{"2":{"140":1,"142":1,"174":1}}],["concrete",{"2":{"108":1}}],["concise",{"2":{"24":3,"104":1,"105":1,"107":2,"110":1,"117":1,"118":1,"120":1,"127":1,"130":1,"155":3,"156":2,"159":1,"160":1,"161":1,"162":1,"163":2,"164":1,"166":1,"167":1,"168":1,"169":1,"173":1,"176":1,"177":1,"181":6}}],["concurrent",{"2":{"14":1,"74":2}}],["concurrently",{"2":{"14":1,"98":1}}],["contrast",{"2":{"181":1}}],["control",{"2":{"78":1,"88":1,"97":1,"171":1,"172":1,"183":2}}],["controlling",{"2":{"52":1,"179":1}}],["controlled",{"2":{"21":1}}],["contribute",{"2":{"61":1}}],["contribution",{"2":{"49":1,"174":1}}],["continuous",{"2":{"71":1}}],["continued",{"2":{"181":1}}],["continues>",{"2":{"181":5}}],["continue",{"2":{"52":1,"97":1,"179":1,"181":6}}],["continue>",{"2":{"20":1,"181":2}}],["continuing",{"2":{"11":1,"77":1}}],["contained",{"2":{"183":2}}],["container",{"2":{"181":1}}],["containing",{"2":{"10":3,"67":4,"106":3,"179":1,"181":7,"183":13}}],["contain",{"2":{"7":1,"10":1,"106":1,"181":5,"183":2}}],["contains",{"2":{"6":1,"7":8,"24":1,"32":1,"37":1,"57":1,"61":1,"63":1,"104":1,"179":1,"181":3,"182":1,"183":2}}],["contemporary",{"2":{"15":1,"181":1}}],["contents",{"2":{"183":2}}],["content=",{"2":{"181":8}}],["content",{"2":{"6":1,"10":11,"16":3,"19":2,"20":2,"21":1,"22":2,"23":1,"27":1,"31":1,"45":1,"46":1,"47":2,"51":1,"52":5,"55":2,"67":1,"88":3,"92":2,"97":2,"106":11,"107":5,"108":3,"140":3,"155":1,"156":2,"179":10,"180":2,"181":132,"183":6}}],["context=true",{"2":{"183":1}}],["context=",{"2":{"183":4}}],["contexts",{"2":{"179":1}}],["contextual",{"2":{"67":1,"120":1,"183":2}}],["contextenumerator",{"2":{"67":3,"181":1,"183":11}}],["contexter",{"2":{"67":8,"183":14}}],["context",{"2":{"2":2,"5":1,"6":3,"8":3,"10":2,"11":2,"28":1,"52":1,"54":1,"57":1,"58":9,"61":6,"63":1,"64":1,"66":5,"67":32,"103":2,"110":7,"117":11,"118":1,"120":11,"121":8,"122":6,"126":1,"140":1,"150":1,"156":1,"157":1,"174":1,"179":4,"181":17,"183":103}}],["consecutive",{"2":{"183":2}}],["conservative",{"2":{"36":1,"181":1}}],["consumed",{"2":{"181":2}}],["consumer",{"2":{"71":1}}],["consuming",{"2":{"3":1}}],["considering",{"2":{"61":1,"121":1,"181":2,"183":1}}],["considered",{"2":{"58":1,"67":1,"181":1,"183":2}}],["consider",{"2":{"24":1,"67":2,"126":1,"140":1,"174":1,"181":1,"183":2}}],["consistent",{"2":{"121":2,"122":1,"161":1,"183":2}}],["consistency",{"2":{"6":1,"36":1,"121":1,"140":1,"181":3,"183":1}}],["consisting",{"2":{"20":1,"181":2}}],["consists",{"2":{"7":1}}],["constant",{"2":{"181":9}}],["constituent",{"2":{"141":1}}],["constrained",{"2":{"181":2}}],["constraints",{"2":{"58":1,"141":1,"181":1}}],["construct",{"2":{"181":1}}],["constructor",{"2":{"181":1}}],["constructive",{"2":{"140":1}}],["constructs",{"2":{"66":1}}],["const",{"2":{"1":2,"24":1,"25":1,"32":1,"37":1,"48":1,"59":1,"82":1,"107":1,"108":1,"181":3}}],["combination",{"2":{"181":1,"183":2}}],["combining",{"2":{"59":1}}],["combines",{"2":{"67":2,"183":2}}],["combined",{"2":{"7":1,"183":1}}],["combine",{"2":{"5":1,"6":2,"7":5}}],["com",{"2":{"20":1,"54":2,"58":1,"75":1,"112":1,"180":1,"181":16,"183":5}}],["comes",{"2":{"91":1,"183":1}}],["come",{"2":{"13":1,"181":3}}],["commas",{"2":{"183":1}}],["commands",{"2":{"58":4,"96":1,"181":4}}],["command",{"2":{"24":1,"28":1,"58":1,"103":1,"181":1}}],["commit",{"2":{"79":1,"95":1}}],["comments",{"2":{"77":1,"130":1,"131":1,"181":1}}],["comment",{"2":{"77":1,"117":1,"118":1,"170":1,"178":1}}],["commercial",{"2":{"77":1}}],["communicates",{"2":{"140":1}}],["communications",{"2":{"163":2}}],["communication",{"2":{"24":3,"100":1,"102":1,"104":1,"105":1,"107":2,"160":1,"162":1,"163":3,"164":1,"166":1,"168":1,"169":1,"173":1,"176":1,"177":1,"179":1,"181":1}}],["community",{"2":{"24":1,"61":1}}],["commun",{"2":{"13":1,"24":1,"181":2}}],["commonly",{"2":{"181":1}}],["common",{"2":{"1":1,"7":2,"57":7,"58":17,"88":1,"91":1,"179":1,"181":20}}],["compelling",{"2":{"167":1,"174":1}}],["computational",{"2":{"61":1}}],["computing",{"2":{"61":8,"67":1,"183":6}}],["computes",{"2":{"183":1}}],["compute",{"2":{"58":1,"181":1,"183":1}}],["computer",{"2":{"22":1,"23":1}}],["comprehensively",{"2":{"120":1,"170":1,"178":1}}],["comprehensive",{"2":{"61":2,"150":1,"155":1}}],["comprehension",{"0":{"20":1}}],["complicated",{"2":{"88":2,"108":1}}],["complicity",{"2":{"58":1,"181":1}}],["complement",{"2":{"182":1}}],["completions",{"2":{"181":5}}],["completions`",{"2":{"181":1}}],["completion",{"2":{"181":7}}],["completeling",{"2":{"181":1}}],["completely",{"2":{"57":1,"67":1,"183":2}}],["completeness",{"2":{"6":1,"121":1,"141":1}}],["complete",{"2":{"5":2,"6":1,"7":2,"121":1}}],["complex",{"2":{"12":1,"16":1,"17":1,"19":1,"21":1,"61":1,"98":1,"179":2,"181":6}}],["compact",{"2":{"170":1,"178":1,"183":1}}],["compass",{"2":{"89":2,"181":2}}],["comparable",{"2":{"78":1}}],["comparison",{"2":{"61":2}}],["comparing",{"2":{"58":1,"181":1}}],["compared",{"2":{"183":1}}],["compare",{"2":{"57":1,"58":2,"179":1,"181":2}}],["company",{"2":{"58":2,"86":1,"181":2}}],["companion",{"2":{"57":1}}],["compatibility",{"2":{"42":1,"181":6}}],["compatible",{"0":{"23":1,"27":1},"2":{"0":2,"23":2,"25":1,"27":1,"181":2}}],["composite",{"2":{"183":2}}],["composes",{"2":{"181":1}}],["compose",{"2":{"93":1,"120":1}}],["composed",{"2":{"7":1,"181":3}}],["components",{"2":{"65":2,"67":1,"183":3}}],["component",{"2":{"52":1,"65":1,"67":1,"179":2,"183":2}}],["compiled",{"2":{"67":1,"73":3,"183":1}}],["compile",{"2":{"24":1,"28":1,"73":1}}]],"serializationVersion":2}'; +export { + _localSearchIndexroot as default +}; diff --git a/previews/PR218/assets/chunks/@localSearchIndexroot.Dn3ujldP.js b/previews/PR218/assets/chunks/@localSearchIndexroot.Dn3ujldP.js deleted file mode 100644 index 2df95bc89..000000000 --- a/previews/PR218/assets/chunks/@localSearchIndexroot.Dn3ujldP.js +++ /dev/null @@ -1,4 +0,0 @@ -const _localSearchIndexroot = '{"documentCount":184,"nextId":184,"documentIds":{"0":"/PromptingTools.jl/previews/PR218/coverage_of_model_providers#Coverage-of-Model-Providers","1":"/PromptingTools.jl/previews/PR218/examples/building_RAG#Building-a-Simple-Retrieval-Augmented-Generation-(RAG)-System-with-RAGTools","2":"/PromptingTools.jl/previews/PR218/examples/building_RAG#RAG-in-Two-Lines","3":"/PromptingTools.jl/previews/PR218/examples/building_RAG#Evaluations","4":"/PromptingTools.jl/previews/PR218/examples/building_RAG#Generate-Q-and-A-pairs","5":"/PromptingTools.jl/previews/PR218/examples/building_RAG#Explore-one-Q-and-A-pair","6":"/PromptingTools.jl/previews/PR218/examples/building_RAG#Evaluate-this-Q-and-A-pair","7":"/PromptingTools.jl/previews/PR218/examples/building_RAG#Evaluate-the-Whole-Set","8":"/PromptingTools.jl/previews/PR218/examples/building_RAG#What-would-we-do-next?","9":"/PromptingTools.jl/previews/PR218/examples/working_with_aitemplates#Using-AITemplates","10":"/PromptingTools.jl/previews/PR218/examples/readme_examples#Various-Examples","11":"/PromptingTools.jl/previews/PR218/examples/readme_examples#ai*-Functions-Overview","12":"/PromptingTools.jl/previews/PR218/examples/readme_examples#Seamless-Integration-Into-Your-Workflow","13":"/PromptingTools.jl/previews/PR218/examples/readme_examples#Advanced-Prompts-/-Conversations","14":"/PromptingTools.jl/previews/PR218/examples/readme_examples#Templated-Prompts","15":"/PromptingTools.jl/previews/PR218/examples/readme_examples#Asynchronous-Execution","16":"/PromptingTools.jl/previews/PR218/examples/readme_examples#Model-Aliases","17":"/PromptingTools.jl/previews/PR218/examples/readme_examples#Embeddings","18":"/PromptingTools.jl/previews/PR218/examples/readme_examples#Classification","19":"/PromptingTools.jl/previews/PR218/examples/readme_examples#Routing-to-Defined-Categories","20":"/PromptingTools.jl/previews/PR218/examples/readme_examples#Data-Extraction","21":"/PromptingTools.jl/previews/PR218/examples/readme_examples#OCR-and-Image-Comprehension","22":"/PromptingTools.jl/previews/PR218/examples/readme_examples#Experimental-Agent-Workflows-/-Output-Validation-with-airetry!","23":"/PromptingTools.jl/previews/PR218/examples/readme_examples#Using-Ollama-models","24":"/PromptingTools.jl/previews/PR218/examples/readme_examples#Using-MistralAI-API-and-other-OpenAI-compatible-APIs","25":"/PromptingTools.jl/previews/PR218/examples/working_with_custom_apis#Custom-APIs","26":"/PromptingTools.jl/previews/PR218/examples/working_with_custom_apis#Using-MistralAI","27":"/PromptingTools.jl/previews/PR218/examples/working_with_custom_apis#Using-other-OpenAI-compatible-APIs","28":"/PromptingTools.jl/previews/PR218/examples/working_with_custom_apis#Using-llama.cpp-server","29":"/PromptingTools.jl/previews/PR218/examples/working_with_custom_apis#Using-Databricks-Foundation-Models","30":"/PromptingTools.jl/previews/PR218/examples/working_with_custom_apis#Using-Together.ai","31":"/PromptingTools.jl/previews/PR218/examples/working_with_custom_apis#Using-Fireworks.ai","32":"/PromptingTools.jl/previews/PR218/examples/working_with_google_ai_studio#Working-with-Google-AI-Studio","33":"/PromptingTools.jl/previews/PR218/examples/working_with_google_ai_studio#Text-Generation-with-aigenerate","34":"/PromptingTools.jl/previews/PR218/examples/working_with_google_ai_studio#Simple-message","35":"/PromptingTools.jl/previews/PR218/examples/working_with_google_ai_studio#Advanced-Prompts","36":"/PromptingTools.jl/previews/PR218/examples/working_with_google_ai_studio#Gotchas","37":"/PromptingTools.jl/previews/PR218/examples/working_with_ollama#Local-models-with-Ollama.ai","38":"/PromptingTools.jl/previews/PR218/examples/working_with_ollama#Text-Generation-with-aigenerate","39":"/PromptingTools.jl/previews/PR218/examples/working_with_ollama#Simple-message","40":"/PromptingTools.jl/previews/PR218/examples/working_with_ollama#Standard-string-interpolation","41":"/PromptingTools.jl/previews/PR218/examples/working_with_ollama#Advanced-Prompts","42":"/PromptingTools.jl/previews/PR218/examples/working_with_ollama#Schema-Changes-/-Custom-models","43":"/PromptingTools.jl/previews/PR218/examples/working_with_ollama#Providing-Images-with-aiscan","44":"/PromptingTools.jl/previews/PR218/examples/working_with_ollama#Embeddings-with-aiembed","45":"/PromptingTools.jl/previews/PR218/examples/working_with_ollama#Simple-embedding-for-one-document","46":"/PromptingTools.jl/previews/PR218/examples/working_with_ollama#Multiple-documents-embedding","47":"/PromptingTools.jl/previews/PR218/examples/working_with_ollama#Using-postprocessing-function","48":"/PromptingTools.jl/previews/PR218/extra_tools/api_tools_intro#APITools-Introduction","49":"/PromptingTools.jl/previews/PR218/extra_tools/api_tools_intro#Highlights","50":"/PromptingTools.jl/previews/PR218/extra_tools/api_tools_intro#References","51":"/PromptingTools.jl/previews/PR218/extra_tools/agent_tools_intro#Agent-Tools-Introduction","52":"/PromptingTools.jl/previews/PR218/extra_tools/agent_tools_intro#Highlights","53":"/PromptingTools.jl/previews/PR218/extra_tools/agent_tools_intro#Examples","54":"/PromptingTools.jl/previews/PR218/extra_tools/agent_tools_intro#Automatic-Fixing-of-AI-Calls","55":"/PromptingTools.jl/previews/PR218/extra_tools/agent_tools_intro#References","56":"/PromptingTools.jl/previews/PR218/extra_tools/rag_tools_intro#RAG-Tools-Introduction","57":"/PromptingTools.jl/previews/PR218/extra_tools/rag_tools_intro#Highlights","58":"/PromptingTools.jl/previews/PR218/extra_tools/rag_tools_intro#Examples","59":"/PromptingTools.jl/previews/PR218/extra_tools/rag_tools_intro#RAG-Interface","60":"/PromptingTools.jl/previews/PR218/extra_tools/rag_tools_intro#System-Overview","61":"/PromptingTools.jl/previews/PR218/extra_tools/rag_tools_intro#RAG-Diagram","62":"/PromptingTools.jl/previews/PR218/extra_tools/rag_tools_intro#Passing-Keyword-Arguments","63":"/PromptingTools.jl/previews/PR218/extra_tools/rag_tools_intro#Deepdive","64":"/PromptingTools.jl/previews/PR218/extra_tools/rag_tools_intro#References","65":"/PromptingTools.jl/previews/PR218/extra_tools/text_utilities_intro#Text-Utilities","66":"/PromptingTools.jl/previews/PR218/extra_tools/text_utilities_intro#Highlights","67":"/PromptingTools.jl/previews/PR218/extra_tools/text_utilities_intro#References","68":"/PromptingTools.jl/previews/PR218/getting_started#Getting-Started","69":"/PromptingTools.jl/previews/PR218/getting_started#Prerequisites","70":"/PromptingTools.jl/previews/PR218/getting_started#Installation","71":"/PromptingTools.jl/previews/PR218/getting_started#Quick-Start-with-@ai_str","72":"/PromptingTools.jl/previews/PR218/getting_started#Using-aigenerate-with-placeholders","73":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Frequently-Asked-Questions","74":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Why-OpenAI","75":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#What-if-I-cannot-access-OpenAI?","76":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Data-Privacy-and-OpenAI","77":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Creating-OpenAI-API-Key","78":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#getting-an-error-argumenterror-api-key-cannot-be-empty-despite-having-set-openai-api-key-getting-an-error-argumenterror-apikey-cannot-be-empty-despite-having-set-openaiapi-key","79":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Getting-an-error-"Rate-limit-exceeded"-from-OpenAI?","80":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Getting-the-error-"429-Too-Many-Requests"?","81":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Setting-OpenAI-Spending-Limits","82":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#How-much-does-it-cost?-Is-it-worth-paying-for?","83":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#How-to-try-new-OpenAI-models-if-I\'m-not-Tier-5-customer?","84":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Configuring-the-Environment-Variable-for-API-Key","85":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Setting-the-API-Key-via-Preferences.jl","86":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Understanding-the-API-Keyword-Arguments-in-aigenerate-(api_kwargs)","87":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Instant-Access-from-Anywhere","88":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Open-Source-Alternatives","89":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Setup-Guide-for-Ollama","90":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Changing-the-Default-Model-or-Schema","91":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Using-Custom-API-Providers-like-Azure-or-Databricks","92":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#How-to-have-Multi-turn-Conversations?","93":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#How-to-have-typed-responses?","94":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#How-to-quickly-create-a-prompt-template?","95":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Do-we-have-a-RecursiveCharacterTextSplitter-like-Langchain?","96":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#How-would-I-fine-tune-a-model?","97":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Can-I-see-how-my-prompt-is-rendered-/-what-is-sent-to-the-API?","98":"/PromptingTools.jl/previews/PR218/frequently_asked_questions#Automatic-Logging-/-Tracing","99":"/PromptingTools.jl/previews/PR218/how_it_works#How-It-Works","100":"/PromptingTools.jl/previews/PR218/how_it_works#Key-Concepts","101":"/PromptingTools.jl/previews/PR218/how_it_works#API/Model-Providers","102":"/PromptingTools.jl/previews/PR218/how_it_works#Schemas","103":"/PromptingTools.jl/previews/PR218/how_it_works#Prompts","104":"/PromptingTools.jl/previews/PR218/how_it_works#Messages","105":"/PromptingTools.jl/previews/PR218/how_it_works#Prompt-Templates","106":"/PromptingTools.jl/previews/PR218/how_it_works#ai*-Functions-Overview","107":"/PromptingTools.jl/previews/PR218/how_it_works#Walkthrough-Example-for-aigenerate","108":"/PromptingTools.jl/previews/PR218/how_it_works#Walkthrough-Example-for-aiextract","109":"/PromptingTools.jl/previews/PR218/prompts/RAG#Basic-Rag-Templates","110":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGAnswerFromContext","111":"/PromptingTools.jl/previews/PR218/prompts/RAG#Ranking-Templates","112":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGRankGPT","113":"/PromptingTools.jl/previews/PR218/prompts/RAG#Metadata-Templates","114":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGExtractMetadataLong","115":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGExtractMetadataShort","116":"/PromptingTools.jl/previews/PR218/prompts/RAG#Refinement-Templates","117":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGAnswerRefiner","118":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGWebSearchRefiner","119":"/PromptingTools.jl/previews/PR218/prompts/RAG#Evaluation-Templates","120":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGCreateQAFromContext","121":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGJudgeAnswerFromContext","122":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGJudgeAnswerFromContextShort","123":"/PromptingTools.jl/previews/PR218/prompts/RAG#Query-Transformations-Templates","124":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGJuliaQueryHyDE","125":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGQueryHyDE","126":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGQueryKeywordExpander","127":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGQueryOptimizer","128":"/PromptingTools.jl/previews/PR218/prompts/RAG#Template:-RAGQuerySimplifier","129":"/PromptingTools.jl/previews/PR218/prompts/agents#Code-Fixing-Templates","130":"/PromptingTools.jl/previews/PR218/prompts/agents#Template:-CodeFixerRCI","131":"/PromptingTools.jl/previews/PR218/prompts/agents#Template:-CodeFixerShort","132":"/PromptingTools.jl/previews/PR218/prompts/agents#Template:-CodeFixerTiny","133":"/PromptingTools.jl/previews/PR218/prompts/agents#Feedback-Templates","134":"/PromptingTools.jl/previews/PR218/prompts/agents#Template:-FeedbackFromEvaluator","135":"/PromptingTools.jl/previews/PR218/prompts/classification#Classification-Templates","136":"/PromptingTools.jl/previews/PR218/prompts/classification#Template:-InputClassifier","137":"/PromptingTools.jl/previews/PR218/prompts/classification#Template:-JudgeIsItTrue","138":"/PromptingTools.jl/previews/PR218/prompts/classification#Template:-QuestionRouter","139":"/PromptingTools.jl/previews/PR218/prompts/critic#Critic-Templates","140":"/PromptingTools.jl/previews/PR218/prompts/critic#Template:-ChiefEditorTranscriptCritic","141":"/PromptingTools.jl/previews/PR218/prompts/critic#Template:-GenericTranscriptCritic","142":"/PromptingTools.jl/previews/PR218/prompts/critic#Template:-JuliaExpertTranscriptCritic","143":"/PromptingTools.jl/previews/PR218/prompts/extraction#Xml-Formatted-Templates","144":"/PromptingTools.jl/previews/PR218/prompts/extraction#Template:-ExtractDataCoTXML","145":"/PromptingTools.jl/previews/PR218/prompts/extraction#Template:-ExtractDataXML","146":"/PromptingTools.jl/previews/PR218/prompts/extraction#Extraction-Templates","147":"/PromptingTools.jl/previews/PR218/prompts/extraction#Template:-ExtractData","148":"/PromptingTools.jl/previews/PR218/prompts/general#General-Templates","149":"/PromptingTools.jl/previews/PR218/prompts/general#Template:-BlankSystemUser","150":"/PromptingTools.jl/previews/PR218/prompts/general#Template:-PromptEngineerForTask","151":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Persona-Task-Templates","152":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-AnalystChaptersInTranscript","153":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-AnalystDecisionsInTranscript","154":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-AnalystThemesInResponses","155":"/PromptingTools.jl/previews/PR218/prompts/persona-task#theme-1-theme-description","156":"/PromptingTools.jl/previews/PR218/prompts/persona-task#theme-2-theme-description","157":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-AssistantAsk","158":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-ConversationLabeler","159":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-DetailOrientedTask","160":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-DrafterEmailBrief","161":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-GenericTopicExpertAsk","162":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-GenericWriter","163":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-JavaScriptExpertAsk","164":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-JuliaBlogWriter","165":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-JuliaExpertAsk","166":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-JuliaExpertCoTTask","167":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-JuliaExpertTestCode","168":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-JuliaRecapCoTTask","169":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-JuliaRecapTask","170":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-LinuxBashExpertAsk","171":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-StorytellerExplainSHAP","172":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Xml-Formatted-Templates","173":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-JuliaExpertAskXML","174":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-JuliaExpertCoTTaskXML","175":"/PromptingTools.jl/previews/PR218/prompts/persona-task#Template:-JuliaExpertTestCodeXML","176":"/PromptingTools.jl/previews/PR218/prompts/visual#Visual-Templates","177":"/PromptingTools.jl/previews/PR218/prompts/visual#Template:-BlogTitleImageGenerator","178":"/PromptingTools.jl/previews/PR218/prompts/visual#Template:-OCRTask","179":"/PromptingTools.jl/previews/PR218/reference_apitools#Reference-for-APITools","180":"/PromptingTools.jl/previews/PR218/reference_experimental#Reference-for-Experimental-Module","181":"/PromptingTools.jl/previews/PR218/reference_agenttools#Reference-for-AgentTools","182":"/PromptingTools.jl/previews/PR218/reference_ragtools#Reference-for-RAGTools","183":"/PromptingTools.jl/previews/PR218/reference#Reference"},"fieldIds":{"title":0,"titles":1,"text":2},"fieldLength":{"0":[4,1,165],"1":[10,1,78],"2":[4,10,197],"3":[1,1,40],"4":[5,1,74],"5":[6,1,68],"6":[6,1,180],"7":[4,1,366],"8":[6,1,111],"9":[2,1,322],"10":[2,1,1],"11":[3,2,306],"12":[5,2,128],"13":[3,2,152],"14":[2,2,161],"15":[2,2,40],"16":[2,2,97],"17":[1,2,65],"18":[1,2,97],"19":[4,3,81],"20":[2,2,158],"21":[4,2,103],"22":[8,2,274],"23":[3,2,108],"24":[8,2,195],"25":[2,1,27],"26":[2,2,145],"27":[5,2,87],"28":[4,2,118],"29":[4,2,85],"30":[3,2,109],"31":[3,2,162],"32":[5,1,83],"33":[4,5,14],"34":[2,8,55],"35":[2,8,90],"36":[1,8,59],"37":[5,1,147],"38":[4,5,1],"39":[2,8,51],"40":[3,8,35],"41":[2,8,122],"42":[4,8,134],"43":[4,5,40],"44":[3,5,1],"45":[5,7,42],"46":[3,7,53],"47":[3,7,61],"48":[2,1,23],"49":[1,2,41],"50":[1,2,87],"51":[3,1,37],"52":[1,3,185],"53":[1,3,1],"54":[5,4,227],"55":[1,3,920],"56":[3,1,100],"57":[1,3,91],"58":[1,3,375],"59":[2,3,1],"60":[2,4,195],"61":[2,4,79],"62":[3,5,105],"63":[1,4,165],"64":[1,3,779],"65":[2,1,28],"66":[1,2,126],"67":[1,2,513],"68":[2,1,1],"69":[1,2,112],"70":[1,2,37],"71":[5,2,112],"72":[4,2,101],"73":[3,1,1],"74":[2,3,54],"75":[7,5,36],"76":[4,3,65],"77":[4,3,54],"78":[19,3,120],"79":[10,3,151],"80":[9,3,87],"81":[4,3,57],"82":[10,3,99],"83":[14,3,118],"84":[7,3,97],"85":[7,3,41],"86":[10,3,8],"87":[4,3,47],"88":[3,3,31],"89":[4,3,108],"90":[6,3,83],"91":[8,3,111],"92":[7,3,120],"93":[6,3,256],"94":[8,3,193],"95":[8,3,70],"96":[8,3,87],"97":[14,3,155],"98":[3,3,141],"99":[3,1,49],"100":[2,3,91],"101":[3,5,56],"102":[1,5,77],"103":[1,5,61],"104":[1,5,77],"105":[2,5,139],"106":[3,5,315],"107":[4,3,203],"108":[4,3,451],"109":[3,1,1],"110":[2,3,61],"111":[2,1,1],"112":[2,2,90],"113":[2,1,1],"114":[2,2,160],"115":[2,2,66],"116":[2,1,1],"117":[2,2,111],"118":[2,2,119],"119":[2,1,1],"120":[2,2,140],"121":[2,2,116],"122":[2,2,63],"123":[3,1,1],"124":[2,3,85],"125":[2,3,83],"126":[2,3,122],"127":[2,3,85],"128":[2,3,65],"129":[3,1,1],"130":[2,3,236],"131":[2,3,126],"132":[2,3,60],"133":[2,1,1],"134":[2,2,23],"135":[2,1,1],"136":[2,2,73],"137":[2,2,41],"138":[2,2,101],"139":[2,1,1],"140":[2,2,188],"141":[2,2,136],"142":[2,2,178],"143":[3,1,1],"144":[2,3,101],"145":[2,3,83],"146":[2,1,1],"147":[2,2,74],"148":[2,1,1],"149":[2,2,35],"150":[2,2,71],"151":[3,1,1],"152":[2,3,198],"153":[2,3,207],"154":[2,3,124],"155":[4,1,5],"156":[4,1,36],"157":[2,4,47],"158":[2,4,108],"159":[2,4,46],"160":[2,4,160],"161":[2,4,65],"162":[2,4,65],"163":[2,4,63],"164":[2,4,118],"165":[2,4,51],"166":[2,4,85],"167":[2,4,171],"168":[2,4,168],"169":[2,4,174],"170":[2,4,66],"171":[2,4,175],"172":[3,4,1],"173":[2,6,60],"174":[2,6,96],"175":[2,6,181],"176":[2,1,1],"177":[2,2,76],"178":[2,2,51],"179":[3,1,101],"180":[4,1,68],"181":[3,1,1183],"182":[3,1,1818],"183":[1,1,2819]},"averageFieldLength":[3.25,2.869565217391304,136.58152173913044],"storedFields":{"0":{"title":"Coverage of Model Providers","titles":[]},"1":{"title":"Building a Simple Retrieval-Augmented Generation (RAG) System with RAGTools","titles":[]},"2":{"title":"RAG in Two Lines","titles":["Building a Simple Retrieval-Augmented Generation (RAG) System with RAGTools"]},"3":{"title":"Evaluations","titles":[]},"4":{"title":"Generate Q&A pairs","titles":["Evaluations"]},"5":{"title":"Explore one Q&A pair","titles":["Evaluations"]},"6":{"title":"Evaluate this Q&A pair","titles":["Evaluations"]},"7":{"title":"Evaluate the Whole Set","titles":["Evaluations"]},"8":{"title":"What would we do next?","titles":[]},"9":{"title":"Using AITemplates","titles":[]},"10":{"title":"Various Examples","titles":[]},"11":{"title":"ai* Functions Overview","titles":["Various Examples"]},"12":{"title":"Seamless Integration Into Your Workflow","titles":["Various Examples"]},"13":{"title":"Advanced Prompts / Conversations","titles":["Various Examples"]},"14":{"title":"Templated Prompts","titles":["Various Examples"]},"15":{"title":"Asynchronous Execution","titles":["Various Examples"]},"16":{"title":"Model Aliases","titles":["Various Examples"]},"17":{"title":"Embeddings","titles":["Various Examples"]},"18":{"title":"Classification","titles":["Various Examples"]},"19":{"title":"Routing to Defined Categories","titles":["Various Examples","Classification"]},"20":{"title":"Data Extraction","titles":["Various Examples"]},"21":{"title":"OCR and Image Comprehension","titles":["Various Examples"]},"22":{"title":"Experimental Agent Workflows / Output Validation with airetry!","titles":["Various Examples"]},"23":{"title":"Using Ollama models","titles":["Various Examples"]},"24":{"title":"Using MistralAI API and other OpenAI-compatible APIs","titles":["Various Examples"]},"25":{"title":"Custom APIs","titles":[]},"26":{"title":"Using MistralAI","titles":["Custom APIs"]},"27":{"title":"Using other OpenAI-compatible APIs","titles":["Custom APIs"]},"28":{"title":"Using llama.cpp server","titles":["Custom APIs"]},"29":{"title":"Using Databricks Foundation Models","titles":["Custom APIs"]},"30":{"title":"Using Together.ai","titles":["Custom APIs"]},"31":{"title":"Using Fireworks.ai","titles":["Custom APIs"]},"32":{"title":"Working with Google AI Studio","titles":[]},"33":{"title":"Text Generation with aigenerate","titles":["Working with Google AI Studio"]},"34":{"title":"Simple message","titles":["Working with Google AI Studio","Text Generation with aigenerate"]},"35":{"title":"Advanced Prompts","titles":["Working with Google AI Studio","Text Generation with aigenerate"]},"36":{"title":"Gotchas","titles":["Working with Google AI Studio","Text Generation with aigenerate"]},"37":{"title":"Local models with Ollama.ai","titles":[]},"38":{"title":"Text Generation with aigenerate","titles":["Local models with Ollama.ai"]},"39":{"title":"Simple message","titles":["Local models with Ollama.ai","Text Generation with aigenerate"]},"40":{"title":"Standard string interpolation","titles":["Local models with Ollama.ai","Text Generation with aigenerate"]},"41":{"title":"Advanced Prompts","titles":["Local models with Ollama.ai","Text Generation with aigenerate"]},"42":{"title":"Schema Changes / Custom models","titles":["Local models with Ollama.ai","Text Generation with aigenerate"]},"43":{"title":"Providing Images with aiscan","titles":["Local models with Ollama.ai"]},"44":{"title":"Embeddings with aiembed","titles":["Local models with Ollama.ai"]},"45":{"title":"Simple embedding for one document","titles":["Local models with Ollama.ai","Embeddings with aiembed"]},"46":{"title":"Multiple documents embedding","titles":["Local models with Ollama.ai","Embeddings with aiembed"]},"47":{"title":"Using postprocessing function","titles":["Local models with Ollama.ai","Embeddings with aiembed"]},"48":{"title":"APITools Introduction","titles":[]},"49":{"title":"Highlights","titles":["APITools Introduction"]},"50":{"title":"References","titles":["APITools Introduction"]},"51":{"title":"Agent Tools Introduction","titles":[]},"52":{"title":"Highlights","titles":["Agent Tools Introduction"]},"53":{"title":"Examples","titles":["Agent Tools Introduction"]},"54":{"title":"Automatic Fixing of AI Calls","titles":["Agent Tools Introduction","Examples"]},"55":{"title":"References","titles":["Agent Tools Introduction"]},"56":{"title":"RAG Tools Introduction","titles":[]},"57":{"title":"Highlights","titles":["RAG Tools Introduction"]},"58":{"title":"Examples","titles":["RAG Tools Introduction"]},"59":{"title":"RAG Interface","titles":["RAG Tools Introduction"]},"60":{"title":"System Overview","titles":["RAG Tools Introduction","RAG Interface"]},"61":{"title":"RAG Diagram","titles":["RAG Tools Introduction","RAG Interface"]},"62":{"title":"Passing Keyword Arguments","titles":["RAG Tools Introduction","RAG Interface","RAG Diagram"]},"63":{"title":"Deepdive","titles":["RAG Tools Introduction","RAG Interface"]},"64":{"title":"References","titles":["RAG Tools Introduction"]},"65":{"title":"Text Utilities","titles":[]},"66":{"title":"Highlights","titles":["Text Utilities"]},"67":{"title":"References","titles":["Text Utilities"]},"68":{"title":"Getting Started","titles":[]},"69":{"title":"Prerequisites","titles":["Getting Started"]},"70":{"title":"Installation","titles":["Getting Started"]},"71":{"title":"Quick Start with @ai_str","titles":["Getting Started"]},"72":{"title":"Using aigenerate with placeholders","titles":["Getting Started"]},"73":{"title":"Frequently Asked Questions","titles":[]},"74":{"title":"Why OpenAI","titles":["Frequently Asked Questions"]},"75":{"title":"What if I cannot access OpenAI?","titles":["Frequently Asked Questions","Why OpenAI"]},"76":{"title":"Data Privacy and OpenAI","titles":["Frequently Asked Questions"]},"77":{"title":"Creating OpenAI API Key","titles":["Frequently Asked Questions"]},"78":{"title":"Getting an error "ArgumentError: api_key cannot be empty" despite having set OPENAI_API_KEY? {#Getting-an-error-"ArgumentError:-apikey-cannot-be-empty"-despite-having-set-OPENAIAPI_KEY?}","titles":["Frequently Asked Questions"]},"79":{"title":"Getting an error "Rate limit exceeded" from OpenAI?","titles":["Frequently Asked Questions"]},"80":{"title":"Getting the error "429 Too Many Requests"?","titles":["Frequently Asked Questions"]},"81":{"title":"Setting OpenAI Spending Limits","titles":["Frequently Asked Questions"]},"82":{"title":"How much does it cost? Is it worth paying for?","titles":["Frequently Asked Questions"]},"83":{"title":"How to try new OpenAI models if I\'m not Tier 5 customer?","titles":["Frequently Asked Questions"]},"84":{"title":"Configuring the Environment Variable for API Key","titles":["Frequently Asked Questions"]},"85":{"title":"Setting the API Key via Preferences.jl","titles":["Frequently Asked Questions"]},"86":{"title":"Understanding the API Keyword Arguments in aigenerate (api_kwargs)","titles":["Frequently Asked Questions"]},"87":{"title":"Instant Access from Anywhere","titles":["Frequently Asked Questions"]},"88":{"title":"Open Source Alternatives","titles":["Frequently Asked Questions"]},"89":{"title":"Setup Guide for Ollama","titles":["Frequently Asked Questions"]},"90":{"title":"Changing the Default Model or Schema","titles":["Frequently Asked Questions"]},"91":{"title":"Using Custom API Providers like Azure or Databricks","titles":["Frequently Asked Questions"]},"92":{"title":"How to have Multi-turn Conversations?","titles":["Frequently Asked Questions"]},"93":{"title":"How to have typed responses?","titles":["Frequently Asked Questions"]},"94":{"title":"How to quickly create a prompt template?","titles":["Frequently Asked Questions"]},"95":{"title":"Do we have a RecursiveCharacterTextSplitter like Langchain?","titles":["Frequently Asked Questions"]},"96":{"title":"How would I fine-tune a model?","titles":["Frequently Asked Questions"]},"97":{"title":"Can I see how my prompt is rendered / what is sent to the API?","titles":["Frequently Asked Questions"]},"98":{"title":"Automatic Logging / Tracing","titles":["Frequently Asked Questions"]},"99":{"title":"How It Works","titles":[]},"100":{"title":"Key Concepts","titles":["How It Works"]},"101":{"title":"API/Model Providers","titles":["How It Works","Key Concepts"]},"102":{"title":"Schemas","titles":["How It Works","Key Concepts"]},"103":{"title":"Prompts","titles":["How It Works","Key Concepts"]},"104":{"title":"Messages","titles":["How It Works","Key Concepts"]},"105":{"title":"Prompt Templates","titles":["How It Works","Key Concepts"]},"106":{"title":"ai* Functions Overview","titles":["How It Works","Key Concepts"]},"107":{"title":"Walkthrough Example for aigenerate","titles":["How It Works"]},"108":{"title":"Walkthrough Example for aiextract","titles":["How It Works"]},"109":{"title":"Basic-Rag Templates","titles":[]},"110":{"title":"Template: RAGAnswerFromContext","titles":["Basic-Rag Templates"]},"111":{"title":"Ranking Templates","titles":[]},"112":{"title":"Template: RAGRankGPT","titles":["Ranking Templates"]},"113":{"title":"Metadata Templates","titles":[]},"114":{"title":"Template: RAGExtractMetadataLong","titles":["Metadata Templates"]},"115":{"title":"Template: RAGExtractMetadataShort","titles":["Metadata Templates"]},"116":{"title":"Refinement Templates","titles":[]},"117":{"title":"Template: RAGAnswerRefiner","titles":["Refinement Templates"]},"118":{"title":"Template: RAGWebSearchRefiner","titles":["Refinement Templates"]},"119":{"title":"Evaluation Templates","titles":[]},"120":{"title":"Template: RAGCreateQAFromContext","titles":["Evaluation Templates"]},"121":{"title":"Template: RAGJudgeAnswerFromContext","titles":["Evaluation Templates"]},"122":{"title":"Template: RAGJudgeAnswerFromContextShort","titles":["Evaluation Templates"]},"123":{"title":"Query-Transformations Templates","titles":[]},"124":{"title":"Template: RAGJuliaQueryHyDE","titles":["Query-Transformations Templates"]},"125":{"title":"Template: RAGQueryHyDE","titles":["Query-Transformations Templates"]},"126":{"title":"Template: RAGQueryKeywordExpander","titles":["Query-Transformations Templates"]},"127":{"title":"Template: RAGQueryOptimizer","titles":["Query-Transformations Templates"]},"128":{"title":"Template: RAGQuerySimplifier","titles":["Query-Transformations Templates"]},"129":{"title":"Code-Fixing Templates","titles":[]},"130":{"title":"Template: CodeFixerRCI","titles":["Code-Fixing Templates"]},"131":{"title":"Template: CodeFixerShort","titles":["Code-Fixing Templates"]},"132":{"title":"Template: CodeFixerTiny","titles":["Code-Fixing Templates"]},"133":{"title":"Feedback Templates","titles":[]},"134":{"title":"Template: FeedbackFromEvaluator","titles":["Feedback Templates"]},"135":{"title":"Classification Templates","titles":[]},"136":{"title":"Template: InputClassifier","titles":["Classification Templates"]},"137":{"title":"Template: JudgeIsItTrue","titles":["Classification Templates"]},"138":{"title":"Template: QuestionRouter","titles":["Classification Templates"]},"139":{"title":"Critic Templates","titles":[]},"140":{"title":"Template: ChiefEditorTranscriptCritic","titles":["Critic Templates"]},"141":{"title":"Template: GenericTranscriptCritic","titles":["Critic Templates"]},"142":{"title":"Template: JuliaExpertTranscriptCritic","titles":["Critic Templates"]},"143":{"title":"Xml-Formatted Templates","titles":[]},"144":{"title":"Template: ExtractDataCoTXML","titles":["Xml-Formatted Templates"]},"145":{"title":"Template: ExtractDataXML","titles":["Xml-Formatted Templates"]},"146":{"title":"Extraction Templates","titles":[]},"147":{"title":"Template: ExtractData","titles":["Extraction Templates"]},"148":{"title":"General Templates","titles":[]},"149":{"title":"Template: BlankSystemUser","titles":["General Templates"]},"150":{"title":"Template: PromptEngineerForTask","titles":["General Templates"]},"151":{"title":"Persona-Task Templates","titles":[]},"152":{"title":"Template: AnalystChaptersInTranscript","titles":["Persona-Task Templates"]},"153":{"title":"Template: AnalystDecisionsInTranscript","titles":["Persona-Task Templates"]},"154":{"title":"Template: AnalystThemesInResponses","titles":["Persona-Task Templates"]},"155":{"title":"Theme 1: [Theme Description]","titles":[]},"156":{"title":"Theme 2: [Theme Description]","titles":[]},"157":{"title":"Template: AssistantAsk","titles":["Theme 2: [Theme Description]"]},"158":{"title":"Template: ConversationLabeler","titles":["Theme 2: [Theme Description]"]},"159":{"title":"Template: DetailOrientedTask","titles":["Theme 2: [Theme Description]"]},"160":{"title":"Template: DrafterEmailBrief","titles":["Theme 2: [Theme Description]"]},"161":{"title":"Template: GenericTopicExpertAsk","titles":["Theme 2: [Theme Description]"]},"162":{"title":"Template: GenericWriter","titles":["Theme 2: [Theme Description]"]},"163":{"title":"Template: JavaScriptExpertAsk","titles":["Theme 2: [Theme Description]"]},"164":{"title":"Template: JuliaBlogWriter","titles":["Theme 2: [Theme Description]"]},"165":{"title":"Template: JuliaExpertAsk","titles":["Theme 2: [Theme Description]"]},"166":{"title":"Template: JuliaExpertCoTTask","titles":["Theme 2: [Theme Description]"]},"167":{"title":"Template: JuliaExpertTestCode","titles":["Theme 2: [Theme Description]"]},"168":{"title":"Template: JuliaRecapCoTTask","titles":["Theme 2: [Theme Description]"]},"169":{"title":"Template: JuliaRecapTask","titles":["Theme 2: [Theme Description]"]},"170":{"title":"Template: LinuxBashExpertAsk","titles":["Theme 2: [Theme Description]"]},"171":{"title":"Template: StorytellerExplainSHAP","titles":["Theme 2: [Theme Description]"]},"172":{"title":"Xml-Formatted Templates","titles":["Theme 2: [Theme Description]"]},"173":{"title":"Template: JuliaExpertAskXML","titles":["Theme 2: [Theme Description]","Xml-Formatted Templates"]},"174":{"title":"Template: JuliaExpertCoTTaskXML","titles":["Theme 2: [Theme Description]","Xml-Formatted Templates"]},"175":{"title":"Template: JuliaExpertTestCodeXML","titles":["Theme 2: [Theme Description]","Xml-Formatted Templates"]},"176":{"title":"Visual Templates","titles":[]},"177":{"title":"Template: BlogTitleImageGenerator","titles":["Visual Templates"]},"178":{"title":"Template: OCRTask","titles":["Visual Templates"]},"179":{"title":"Reference for APITools","titles":[]},"180":{"title":"Reference for Experimental Module","titles":[]},"181":{"title":"Reference for AgentTools","titles":[]},"182":{"title":"Reference for RAGTools","titles":[]},"183":{"title":"Reference","titles":[]}},"dirtCount":0,"index":[["θ",{"2":{"181":1}}],["β",{"2":{"181":1}}],["α",{"2":{"181":2}}],["→",{"2":{"108":1}}],["zoom",{"2":{"108":1}}],["zshrc",{"2":{"84":1}}],["zero",{"2":{"67":1,"167":1,"175":1,"183":4}}],["~300",{"2":{"183":3}}],["~0",{"2":{"82":1}}],["~",{"2":{"78":1,"84":1,"87":1}}],["~word",{"2":{"64":1,"182":1}}],["~words",{"2":{"64":1,"182":1}}],["^",{"2":{"67":2,"183":2}}],["÷",{"2":{"55":1,"64":1,"181":1,"182":1}}],["├─",{"2":{"55":9,"181":11}}],["👋",{"2":{"183":1}}],["😊",{"2":{"42":1}}],["😃",{"2":{"2":1,"55":1,"181":1}}],["905",{"2":{"183":1}}],["909",{"2":{"158":1}}],["93",{"2":{"181":1}}],["911",{"2":{"108":2}}],["911t",{"2":{"93":2}}],["94",{"2":{"55":1,"181":1}}],["9999999999999982",{"2":{"47":1}}],["99",{"2":{"28":1,"181":1}}],["9",{"2":{"23":1,"24":1,"26":1,"31":1,"182":4,"183":2}}],["9examples",{"2":{"7":1}}],["|im",{"2":{"183":4}}],["|",{"2":{"22":2,"54":2,"55":1,"181":1}}],["|>",{"2":{"9":1,"14":3,"21":1,"54":1,"55":4,"67":1,"93":2,"98":4,"106":1,"181":5,"182":1,"183":16}}],["y`",{"2":{"168":1,"169":1}}],["yarrr",{"2":{"94":2,"183":2}}],["yay",{"2":{"55":1,"181":1}}],["y",{"2":{"55":2,"87":1,"181":4,"182":2,"183":2}}],["years",{"2":{"158":1}}],["yes",{"2":{"41":1,"72":1,"95":1,"97":1}}],["yedi",{"2":{"35":1,"41":2,"183":5}}],["yet",{"2":{"32":1,"55":3,"83":1,"107":1,"141":1,"160":1,"180":1,"181":2,"182":2,"183":6}}],["yellow",{"2":{"22":2,"54":3,"55":5,"181":5}}],["york",{"2":{"183":10}}],["yoda",{"2":{"13":2,"35":1,"41":1,"183":5}}],["youtube",{"2":{"152":1,"153":1}}],["young",{"2":{"13":1,"35":1,"41":1,"183":1}}],["yours",{"2":{"183":13}}],["yourself",{"2":{"41":2,"183":1}}],["your",{"0":{"12":1},"2":{"2":4,"4":1,"8":1,"9":9,"11":1,"12":3,"13":1,"14":3,"16":1,"23":1,"24":3,"26":1,"27":1,"28":1,"29":1,"32":2,"35":2,"37":2,"41":1,"42":1,"55":3,"56":1,"57":1,"60":1,"61":1,"64":5,"66":2,"69":5,"70":1,"71":2,"76":5,"77":1,"78":2,"79":2,"80":4,"81":1,"82":1,"83":2,"84":6,"85":2,"87":1,"89":2,"90":3,"91":4,"92":2,"94":2,"96":1,"97":1,"98":2,"100":1,"102":1,"104":1,"105":4,"106":1,"107":4,"108":3,"117":1,"118":1,"121":1,"124":1,"125":1,"127":2,"130":6,"131":1,"136":1,"138":1,"140":1,"141":2,"142":1,"144":1,"152":3,"153":4,"154":1,"157":2,"158":1,"159":1,"160":1,"161":5,"163":5,"165":2,"166":4,"167":3,"170":5,"171":2,"173":2,"174":4,"175":3,"181":3,"182":13,"183":54}}],["you",{"2":{"0":1,"1":2,"2":3,"4":2,"5":1,"7":6,"8":1,"9":32,"11":9,"12":9,"13":11,"14":7,"15":2,"16":2,"17":2,"18":3,"19":1,"20":6,"21":4,"22":6,"23":5,"24":12,"25":1,"26":9,"27":3,"28":5,"29":5,"30":9,"31":9,"32":2,"33":1,"34":4,"35":3,"37":4,"39":6,"40":2,"41":10,"42":9,"43":1,"46":3,"49":1,"50":1,"52":1,"54":4,"55":25,"56":1,"57":2,"58":7,"60":7,"62":4,"64":19,"66":2,"67":5,"69":5,"70":1,"71":3,"72":3,"74":1,"76":2,"77":1,"78":6,"79":11,"80":5,"81":3,"82":9,"83":4,"84":5,"85":2,"87":1,"88":2,"89":7,"90":5,"91":6,"92":4,"93":5,"94":21,"96":2,"97":3,"98":12,"100":4,"101":3,"102":1,"103":3,"104":1,"105":8,"106":8,"107":8,"108":19,"110":2,"112":1,"114":2,"115":2,"117":3,"118":3,"120":1,"126":2,"128":1,"130":4,"131":2,"132":2,"136":2,"137":1,"138":3,"140":1,"152":3,"153":1,"154":1,"156":1,"157":2,"159":1,"160":1,"161":1,"162":3,"163":1,"164":4,"165":2,"166":3,"167":3,"169":1,"170":1,"171":1,"173":2,"174":2,"175":3,"179":1,"181":24,"182":36,"183":211}}],["└─",{"2":{"55":9,"181":13}}],["└",{"2":{"12":1}}],["┌",{"2":{"12":1}}],["70",{"2":{"183":7}}],["70b",{"2":{"29":3}}],["72",{"2":{"183":6}}],["74",{"2":{"72":1}}],["754",{"2":{"138":1}}],["75",{"2":{"64":1,"182":1}}],["77",{"2":{"55":1,"181":1}}],["786",{"2":{"131":1}}],["78",{"2":{"31":1}}],["787",{"2":{"17":1,"183":1}}],["7",{"2":{"12":1,"55":7,"58":1,"160":1,"181":8,"182":4,"183":2}}],["7examples",{"2":{"7":1}}],["`1",{"2":{"183":1}}],["`1+1`",{"2":{"183":5}}],["`2`",{"2":{"183":5}}],["`top",{"2":{"182":1}}],["`test",{"2":{"64":1,"182":1}}],["`textchunker",{"2":{"64":1,"182":1}}],["`build",{"2":{"182":1}}],["`begin`",{"2":{"168":1,"169":1}}],["`empty",{"2":{"183":2}}],["`error`",{"2":{"183":2}}],["`end`",{"2":{"168":1,"169":1}}],["`example`",{"2":{"9":2}}],["`$`",{"2":{"168":1,"169":1}}],["`$a+$a`",{"2":{"40":1,"183":6}}],["`while`",{"2":{"168":1,"169":1}}],["`function`",{"2":{"168":1,"169":1}}],["`function",{"2":{"168":1,"169":1}}],["`for`",{"2":{"168":1,"169":1}}],["`false`",{"2":{"108":1}}],["`fahrenheit`",{"2":{"20":1}}],["`image",{"2":{"183":2}}],["`isx",{"2":{"168":1,"169":1}}],["`if",{"2":{"168":1,"169":1}}],["`index`",{"2":{"64":2,"182":3}}],["`innerjoin`",{"2":{"7":1}}],["`x",{"2":{"168":2,"169":2}}],["`other",{"2":{"153":1}}],["`out",{"2":{"55":1,"181":1}}],["`dict",{"2":{"168":1,"169":1}}],["`distributed`",{"2":{"58":1}}],["`data`",{"2":{"147":1}}],["`register",{"2":{"183":1}}],["`return",{"2":{"106":1}}],["`run",{"2":{"181":1}}],["`ragresult`",{"2":{"64":1,"182":1}}],["`you",{"2":{"104":1}}],["`streamcallback",{"2":{"183":3}}],["`score",{"2":{"182":1}}],["`schema",{"2":{"108":1}}],["`schema`",{"2":{"27":1,"28":1}}],["`success",{"2":{"55":1,"181":1}}],["`maybeextract",{"2":{"183":1}}],["`map`",{"2":{"79":1}}],["`model",{"2":{"183":1}}],["`model`",{"2":{"28":1}}],["`message`",{"2":{"183":2}}],["`msg",{"2":{"37":1}}],["`processor`",{"2":{"182":1}}],["`pt",{"2":{"27":1,"28":1}}],["`pkg`",{"2":{"9":1}}],["`local",{"2":{"183":1}}],["`location`",{"2":{"20":1}}],["`last",{"2":{"22":2,"54":2,"55":2,"181":2}}],["`number`",{"2":{"168":1,"169":1}}],["`nothing`",{"2":{"108":1}}],["`n",{"2":{"22":1,"54":1,"55":1,"181":1}}],["`condition`",{"2":{"108":1}}],["`convert`",{"2":{"108":1}}],["`conversation`",{"2":{"55":1,"106":1,"181":1}}],["`config",{"2":{"55":1,"181":1}}],["`config`",{"2":{"22":1,"54":1,"55":1,"181":1}}],["`celsius`",{"2":{"20":1}}],["`usermessage`",{"2":{"55":1,"181":1}}],["`using`",{"2":{"9":1}}],["`unit`",{"2":{"20":1}}],["``",{"2":{"14":1,"183":1}}],["```plaintext",{"2":{"131":1,"132":1}}],["````",{"2":{"55":1,"181":1}}],["```sql",{"2":{"21":1,"183":2}}],["```",{"2":{"9":2,"12":2,"55":1,"121":1,"122":1,"130":1,"131":1,"132":1,"152":2,"153":2,"167":2,"175":2,"181":2,"182":1}}],["```julia",{"2":{"9":2,"67":1,"130":2,"131":1,"167":2,"175":2,"181":1,"182":1,"183":3}}],["`abstractstring`",{"2":{"168":1,"169":1}}],["`a",{"2":{"168":1,"169":1}}],["`answerer",{"2":{"62":1}}],["`answerer`",{"2":{"62":1}}],["`answer",{"2":{"62":1}}],["`aigenerate",{"2":{"55":1,"181":1}}],["`aicall`",{"2":{"22":1,"54":1,"55":2,"181":2}}],["`airag`",{"2":{"6":1,"64":1,"182":1}}],["`api",{"2":{"22":1,"37":1,"54":1,"55":1,"181":1}}],["`ask`",{"2":{"9":2,"14":1,"183":2}}],["`add`",{"2":{"9":1,"167":1,"175":1}}],["`",{"2":{"9":2,"12":2,"22":1,"27":1,"28":1,"37":2,"54":1,"55":2,"58":1,"62":1,"64":1,"108":2,"130":2,"164":2,"167":6,"168":8,"169":8,"175":6,"181":3,"182":1,"183":6}}],["│",{"2":{"7":12,"12":5,"55":14,"181":16}}],["$date",{"2":{"183":2}}],["$location",{"2":{"183":2}}],["$lower",{"2":{"55":1,"181":1}}],["$f",{"2":{"108":1}}],["$25",{"2":{"108":1}}],["$10",{"2":{"81":1}}],["$50",{"2":{"115":1}}],["$5",{"2":{"81":1}}],["$user",{"2":{"55":1,"181":1}}],["$upper",{"2":{"55":1,"181":1}}],["$",{"2":{"7":3,"55":9,"67":7,"71":1,"93":1,"108":1,"181":9,"183":7}}],["$0",{"2":{"4":1,"12":1,"21":2,"24":1,"26":1,"30":1,"31":1,"58":1,"71":2,"72":1,"92":1,"183":5}}],[">0",{"2":{"182":1}}],[">tryparse",{"2":{"93":1}}],[">x",{"2":{"7":2,"182":1}}],[">",{"2":{"7":1,"14":1,"19":3,"22":1,"31":1,"46":1,"54":1,"55":8,"58":1,"62":1,"64":1,"67":5,"71":1,"108":2,"112":2,"130":2,"152":2,"153":3,"168":1,"169":1,"181":9,"182":8,"183":28}}],["x123",{"2":{"183":2}}],["x^2`",{"2":{"168":1,"169":1}}],["xml",{"0":{"143":1,"172":1},"1":{"144":1,"145":1,"173":1,"174":1,"175":1},"2":{"144":1,"145":1,"173":1,"174":1,"175":1,"183":1}}],["x3c",{"2":{"21":1,"27":1,"55":6,"58":1,"60":1,"64":5,"67":13,"93":5,"130":4,"144":4,"145":4,"152":3,"153":3,"160":2,"167":1,"173":2,"174":12,"175":17,"181":11,"182":116,"183":163}}],["xyz",{"2":{"12":3,"55":1,"82":1,"106":2,"181":3,"182":1}}],["x",{"2":{"7":4,"22":2,"46":2,"54":2,"55":6,"79":4,"87":1,"93":2,"168":2,"169":2,"181":17,"182":11,"183":11}}],["x26",{"2":{"4":1,"67":2,"120":1,"182":2,"183":12}}],["08",{"2":{"183":2}}],["02",{"2":{"183":1}}],["024",{"2":{"7":1}}],["07",{"2":{"183":1}}],["03",{"2":{"183":5}}],["05",{"2":{"181":1,"183":7}}],["0s",{"2":{"181":1}}],["0011",{"2":{"183":1}}],["0015",{"2":{"183":3}}],["002",{"2":{"183":3}}],["000",{"2":{"64":1,"67":2,"79":3,"182":10,"183":2}}],["0001",{"2":{"30":1,"31":1,"71":1,"72":1,"82":2}}],["0002",{"2":{"12":1}}],["0045",{"2":{"21":1,"183":1}}],["0117",{"2":{"21":1,"183":2}}],["014",{"2":{"7":7}}],["015",{"2":{"7":2}}],["0dict",{"2":{"7":3}}],["0",{"2":{"6":2,"11":2,"17":2,"20":1,"23":1,"24":2,"26":2,"31":1,"32":1,"33":1,"42":1,"47":2,"55":8,"58":6,"64":24,"66":2,"67":4,"71":2,"82":1,"89":2,"92":2,"94":2,"106":1,"110":1,"115":1,"122":1,"124":1,"125":1,"126":1,"127":1,"128":1,"131":1,"132":1,"134":1,"138":1,"140":1,"141":1,"142":1,"144":1,"145":1,"158":1,"161":1,"162":1,"163":1,"164":1,"166":1,"167":3,"169":1,"170":1,"171":1,"174":1,"175":4,"177":1,"181":29,"182":84,"183":66}}],["3rd",{"2":{"183":7}}],["39931",{"2":{"181":2}}],["390",{"2":{"124":1}}],["383",{"2":{"162":1}}],["31",{"2":{"71":1}}],["344",{"2":{"163":1}}],["34900",{"2":{"67":2,"183":2}}],["34",{"2":{"58":1,"183":1}}],["34315",{"2":{"55":1,"181":1}}],["374",{"2":{"170":1}}],["375",{"2":{"110":1}}],["37581",{"2":{"55":1,"181":1}}],["37",{"2":{"55":1,"181":1}}],["354",{"2":{"125":1}}],["35",{"2":{"55":4,"67":2,"181":4,"183":2}}],["35603",{"2":{"55":1,"181":1}}],["32000",{"2":{"181":3}}],["32991",{"2":{"55":5,"181":5}}],["32",{"2":{"55":2,"71":1,"181":2}}],["337",{"2":{"161":1}}],["33",{"2":{"55":5,"181":5}}],["33333dict",{"2":{"7":1}}],["3x",{"2":{"55":1,"181":1}}],["366",{"2":{"136":1}}],["36",{"2":{"55":1,"181":1}}],["362",{"2":{"21":1,"183":1}}],["3000",{"2":{"167":1,"175":1}}],["300",{"2":{"164":1}}],["30088",{"2":{"55":2,"181":2}}],["30",{"2":{"20":2,"82":1,"124":1,"125":1,"182":2,"183":15}}],["3examples",{"2":{"7":1}}],["3",{"2":{"6":2,"7":11,"9":1,"11":1,"12":1,"37":1,"55":3,"58":4,"67":3,"71":1,"72":3,"79":1,"95":1,"98":1,"106":1,"108":2,"114":3,"115":2,"130":4,"131":1,"154":3,"158":1,"160":1,"167":2,"168":1,"169":1,"175":2,"181":8,"182":5,"183":26}}],["+",{"2":{"6":1,"55":2,"58":2,"60":1,"63":1,"64":1,"108":2,"167":1,"175":1,"181":2,"182":1,"183":5}}],["5th",{"2":{"182":1}}],["595",{"2":{"174":1}}],["570",{"2":{"144":1}}],["57694",{"2":{"55":1,"181":1}}],["519",{"2":{"145":1,"166":1}}],["514",{"2":{"127":1}}],["512",{"2":{"55":5,"181":5,"182":1}}],["5=best",{"2":{"122":1}}],["50m",{"2":{"183":2}}],["504",{"2":{"177":1}}],["500",{"2":{"147":1,"182":2}}],["50086",{"2":{"55":4,"181":4}}],["50",{"2":{"55":4,"92":1,"181":4}}],["52910",{"2":{"55":4,"181":4}}],["55394",{"2":{"55":1,"181":1}}],["5examples",{"2":{"7":1}}],["5",{"0":{"83":1},"2":{"6":5,"12":2,"21":1,"23":3,"28":1,"30":2,"31":1,"37":3,"40":1,"47":1,"50":1,"55":11,"58":3,"62":2,"64":11,"69":1,"71":3,"79":1,"80":1,"83":2,"89":2,"92":1,"98":1,"100":1,"104":1,"107":1,"114":2,"115":1,"121":14,"122":2,"126":1,"154":2,"158":2,"160":2,"167":1,"171":1,"175":1,"179":1,"181":11,"182":25,"183":50}}],["837",{"2":{"183":1}}],["8k",{"2":{"183":1}}],["84",{"2":{"181":1}}],["886",{"2":{"164":1}}],["82",{"2":{"55":1,"181":1}}],["87",{"2":{"30":1}}],["8755f69180b7ac7ee76a69ae68ec36872a116ad4",{"2":{"21":1,"183":2}}],["8x7b",{"2":{"28":1,"37":1,"108":1}}],["80k",{"2":{"182":3}}],["80kg",{"2":{"20":1,"183":7}}],["8080",{"2":{"28":1,"62":3,"64":3,"182":3,"183":2}}],["8081",{"2":{"24":1,"183":2}}],["80",{"2":{"20":1,"182":6,"183":4}}],["8examples",{"2":{"7":1}}],["8",{"2":{"6":1,"55":1,"181":1,"182":4}}],["64",{"2":{"182":2}}],["636",{"2":{"112":1}}],["60",{"2":{"55":3,"79":1,"82":1,"182":2,"183":6}}],["67",{"2":{"55":10,"181":11}}],["67dict",{"2":{"7":3}}],["69",{"2":{"23":1,"183":2}}],["66667dict",{"2":{"7":3}}],["6examples",{"2":{"7":1}}],["6",{"2":{"6":1,"7":1,"42":1,"55":6,"67":1,"79":1,"181":7,"182":5,"183":4}}],["420",{"2":{"122":1,"177":1}}],["429",{"0":{"80":1}}],["48",{"2":{"181":1}}],["4823e00fbf65c00479468331022bb56ae4c48eae",{"2":{"67":1,"182":1,"183":6}}],["48343",{"2":{"55":1,"181":1}}],["4k",{"2":{"67":1,"183":1}}],["46",{"2":{"94":1,"183":1}}],["46632",{"2":{"55":1,"181":1}}],["46839",{"2":{"55":2,"181":2}}],["43094",{"2":{"55":1,"181":1}}],["43",{"2":{"55":1,"181":1}}],["44816",{"2":{"55":2,"181":2}}],["41",{"2":{"55":1,"134":1,"181":1}}],["4examples",{"2":{"7":1}}],["402",{"2":{"150":1}}],["40796033843072876",{"2":{"47":1}}],["4096×2",{"2":{"23":1,"46":1,"183":1}}],["4096",{"2":{"23":1,"45":2,"46":1,"47":1,"183":2}}],["40",{"2":{"7":8,"58":1,"183":2}}],["4",{"2":{"6":3,"7":2,"9":1,"16":5,"24":1,"26":1,"55":10,"58":3,"67":1,"71":1,"72":1,"98":3,"114":1,"154":1,"158":1,"181":10,"182":14,"183":10}}],["q4",{"2":{"28":1,"37":1}}],["qaevalresult",{"2":{"6":1,"182":5}}],["qaevalitems",{"2":{"182":1}}],["qaevalitem",{"2":{"4":1,"5":1,"64":5,"182":13}}],["qa",{"2":{"4":1,"6":2,"7":5,"57":1,"64":8,"182":38,"183":3}}],["q",{"0":{"4":1,"5":1,"6":1},"2":{"3":1,"4":1,"6":1,"64":4,"182":4}}],["quantization",{"2":{"158":1,"182":6}}],["quantum",{"2":{"58":1}}],["quarter",{"2":{"64":1,"182":1}}],["quality=",{"2":{"183":1}}],["quality`",{"2":{"183":1}}],["quality",{"2":{"3":2,"5":1,"7":1,"8":1,"9":3,"14":2,"18":1,"95":1,"105":1,"107":2,"121":2,"122":2,"152":1,"157":1,"161":1,"163":1,"165":1,"170":1,"173":1,"182":2,"183":6}}],["queried",{"2":{"183":2}}],["queries",{"2":{"136":1}}],["query",{"0":{"123":1},"1":{"124":1,"125":1,"126":1,"127":1,"128":1},"2":{"50":3,"63":3,"67":7,"112":4,"117":7,"118":8,"124":7,"125":5,"126":13,"127":11,"128":8,"179":3,"182":35,"183":15}}],["question=",{"2":{"182":4}}],["question>",{"2":{"173":2}}],["questionrouter",{"0":{"138":1},"2":{"183":1}}],["questions",{"0":{"73":1},"1":{"74":1,"75":1,"76":1,"77":1,"78":1,"79":1,"80":1,"81":1,"82":1,"83":1,"84":1,"85":1,"86":1,"87":1,"88":1,"89":1,"90":1,"91":1,"92":1,"93":1,"94":1,"95":1,"96":1,"97":1,"98":1},"2":{"3":1,"4":1,"9":2,"14":1,"16":1,"24":1,"26":1,"31":1,"34":1,"37":1,"42":1,"58":1,"64":3,"67":1,"71":1,"110":1,"124":2,"157":1,"161":1,"163":1,"165":1,"170":1,"173":1,"182":8,"183":5}}],["question",{"2":{"2":5,"5":1,"6":4,"7":2,"8":1,"9":7,"14":1,"24":1,"26":1,"57":2,"58":8,"61":2,"62":3,"64":30,"82":1,"103":1,"104":1,"105":2,"107":2,"110":5,"112":4,"117":1,"118":1,"120":7,"121":8,"122":6,"125":1,"138":7,"152":1,"153":1,"154":4,"156":2,"157":1,"161":1,"163":1,"165":1,"170":1,"182":74,"183":6}}],["quirks",{"2":{"83":1,"93":1}}],["quicker",{"2":{"181":1}}],["quick",{"0":{"71":1},"2":{"29":1,"69":1,"78":1,"94":2,"160":1,"183":6}}],["quickly",{"0":{"94":1},"2":{"12":1,"56":1,"67":1,"152":1,"182":1,"183":1}}],["quite",{"2":{"28":1,"63":1,"79":1,"95":1,"108":1,"182":1}}],["quote",{"2":{"154":1}}],["quotes",{"2":{"130":3,"152":1,"182":1}}],["quota",{"2":{"80":1}}],["quot",{"0":{"78":4,"79":2,"80":2},"2":{"1":4,"2":2,"6":2,"7":8,"9":2,"11":8,"12":6,"14":4,"16":10,"18":8,"19":2,"22":6,"28":2,"29":2,"30":2,"31":2,"32":2,"33":2,"34":2,"37":4,"42":10,"50":6,"52":6,"54":6,"55":26,"58":2,"60":2,"64":20,"67":52,"69":6,"71":4,"72":2,"78":8,"79":4,"80":4,"82":2,"83":4,"84":4,"85":6,"87":2,"89":2,"90":10,"91":6,"92":6,"93":8,"95":16,"96":6,"97":4,"98":2,"100":2,"101":4,"102":6,"104":8,"105":6,"106":8,"107":6,"108":16,"114":2,"115":2,"120":2,"134":2,"152":2,"153":2,"154":2,"155":2,"156":2,"167":2,"169":2,"171":2,"175":2,"179":6,"181":64,"182":41,"183":292}}],["=template",{"2":{"183":2}}],["=pt",{"2":{"183":3}}],["=prompt",{"2":{"183":1}}],["=url",{"2":{"183":1}}],["=user",{"2":{"149":1}}],["=context",{"2":{"182":1}}],["=1",{"2":{"182":1}}],["=system",{"2":{"149":1,"183":1}}],["=main",{"2":{"55":1,"183":1}}],["=nothing",{"2":{"55":2,"181":4,"182":2}}],["==true",{"2":{"183":1}}],["==",{"2":{"22":2,"54":3,"55":4,"93":1,"167":4,"175":4,"181":13,"182":2,"183":4}}],["=wordcount",{"2":{"14":1,"183":2}}],["=>dict",{"2":{"108":6}}],["=>",{"2":{"6":8,"7":1,"93":3,"97":4,"107":4,"108":16,"168":1,"169":1,"182":3,"183":41}}],["=",{"2":{"1":2,"2":6,"4":4,"6":8,"7":13,"9":8,"11":1,"13":6,"14":7,"15":2,"16":3,"17":4,"18":2,"19":4,"20":2,"21":3,"22":14,"23":5,"24":7,"25":1,"26":3,"27":5,"28":1,"29":9,"30":1,"31":3,"32":1,"34":2,"35":2,"37":2,"39":2,"40":5,"41":2,"42":8,"43":1,"45":2,"46":5,"47":3,"50":4,"51":1,"54":14,"55":68,"56":1,"58":8,"60":2,"62":28,"64":146,"67":28,"69":2,"71":1,"72":1,"83":3,"84":2,"85":1,"87":1,"90":2,"91":1,"92":2,"93":11,"95":5,"97":9,"98":6,"102":1,"105":1,"106":3,"107":10,"108":20,"166":1,"167":1,"168":2,"169":1,"174":1,"175":1,"179":7,"181":134,"182":423,"183":667}}],["jxnl",{"2":{"152":1,"153":1}}],["javascript",{"2":{"163":2}}],["javascriptexpertask",{"0":{"163":1}}],["jargon",{"2":{"126":1}}],["jarvislabs",{"2":{"96":1}}],["jack",{"2":{"20":1,"94":4,"183":9}}],["james",{"2":{"20":1,"183":9}}],["jane",{"2":{"7":4,"114":2}}],["jedi",{"2":{"13":3,"35":1,"183":1}}],["joy",{"2":{"41":1}}],["job",{"2":{"7":4,"82":1,"183":5}}],["job=",{"2":{"7":1}}],["jobs",{"2":{"7":7}}],["john",{"2":{"7":3,"40":2,"92":6,"97":2}}],["joint",{"2":{"182":1}}],["join",{"2":{"6":3,"7":23,"108":2,"183":3}}],["joining",{"2":{"5":2,"6":2,"7":2,"75":1}}],["joins",{"2":{"2":1,"5":3,"6":1,"7":15,"182":1}}],["joinpath",{"2":{"2":2,"9":1,"183":3}}],["jsonl",{"2":{"96":2,"183":1}}],["json",{"2":{"4":2,"9":2,"94":1,"96":1,"98":1,"107":1,"108":14,"183":40}}],["json3",{"2":{"1":1,"4":2,"23":1,"45":1,"108":7,"183":3}}],["jump",{"2":{"181":1,"183":1}}],["judgment",{"2":{"160":1}}],["judging",{"2":{"121":1,"182":1}}],["judge=",{"2":{"182":1}}],["judgerating",{"2":{"182":2,"183":1}}],["judgeisittrue",{"0":{"137":1},"2":{"14":1,"18":2,"183":7}}],["judgeallscores",{"2":{"6":1,"182":2,"183":1}}],["judged",{"2":{"6":2}}],["judge",{"2":{"2":1,"6":3,"7":2,"11":1,"18":1,"55":1,"106":1,"121":4,"122":3,"137":1,"181":1,"182":8,"183":3}}],["juicy",{"2":{"31":2,"108":8}}],["just",{"2":{"2":1,"9":4,"11":1,"14":1,"24":1,"26":1,"31":1,"50":1,"55":1,"78":1,"80":1,"87":1,"89":2,"92":1,"93":1,"105":1,"106":1,"108":4,"110":1,"117":1,"118":1,"138":1,"158":2,"179":1,"181":1,"183":13}}],["juliais",{"2":{"183":1}}],["juliainitialize",{"2":{"183":1}}],["juliaindex",{"2":{"58":1,"64":2,"182":6}}],["juliaweather",{"2":{"183":1}}],["juliawrap",{"2":{"67":2,"183":7}}],["juliaqa",{"2":{"182":1}}],["juliaquestion",{"2":{"58":1}}],["juliakwargs",{"2":{"182":1}}],["juliakw",{"2":{"182":3}}],["juliakeywordsprocessor",{"2":{"182":1}}],["juliakeywordsindexer",{"2":{"182":1}}],["julianotagger",{"2":{"182":1}}],["julianotagfilter",{"2":{"182":1}}],["julianoreranker",{"2":{"182":1}}],["julianorephraser",{"2":{"182":1}}],["julianorefiner",{"2":{"182":1}}],["julianoprocessor",{"2":{"182":1}}],["julianopostprocessor",{"2":{"182":1}}],["julianoembedder",{"2":{"182":1}}],["julianew",{"2":{"13":1,"183":1}}],["juliahandle",{"2":{"183":1}}],["juliahamming",{"2":{"182":1}}],["juliahcat",{"2":{"182":1}}],["juliahyderephraser",{"2":{"182":1}}],["juliahtmlstyler",{"2":{"182":1}}],["juliagroqopenaischema",{"2":{"183":1}}],["juliagetpropertynested",{"2":{"182":1}}],["juliaget",{"2":{"182":7,"183":1}}],["juliagenerate",{"2":{"64":1,"182":1,"183":1}}],["juliagamma",{"2":{"181":1}}],["juliabin",{"2":{"182":2}}],["juliabinary",{"2":{"182":1}}],["juliabinarycosinesimilarity",{"2":{"182":1}}],["juliabinarybatchembedder",{"2":{"182":1}}],["juliabitpacked",{"2":{"182":1}}],["juliabitpackedcosinesimilarity",{"2":{"182":1}}],["juliabitpackedbatchembedder",{"2":{"182":1}}],["juliabatchembedder",{"2":{"182":1}}],["juliabm25similarity",{"2":{"182":1}}],["juliabeta",{"2":{"181":1}}],["juliablogwriter",{"0":{"164":1}}],["juliabuild",{"2":{"64":3,"182":5,"183":3}}],["juliaollama",{"2":{"183":1}}],["juliaopenai",{"2":{"183":3}}],["juliaopenrouteropenaischema",{"2":{"183":1}}],["juliaopentagger",{"2":{"182":1}}],["juliaobj",{"2":{"108":1}}],["juliaoutput",{"2":{"54":1}}],["juliaout",{"2":{"22":1,"55":2,"181":2}}],["juliaupdate",{"2":{"183":1}}],["juliaunique",{"2":{"183":1}}],["juliaunwrap",{"2":{"98":1}}],["juliausermessagewithimages",{"2":{"183":1}}],["juliausermessage",{"2":{"183":1}}],["juliausing",{"2":{"1":1,"9":1,"11":1,"13":1,"14":1,"17":1,"21":1,"22":1,"25":1,"32":2,"37":1,"47":1,"48":1,"51":1,"66":1,"70":1,"93":1,"95":1,"98":3,"106":1,"107":2,"108":1,"182":1,"183":10}}],["juliauct",{"2":{"181":1}}],["julialocalserveropenaischema",{"2":{"183":1}}],["juliaload",{"2":{"182":1,"183":2}}],["juliallmleaderboard",{"2":{"96":1}}],["julialength",{"2":{"67":1,"183":1}}],["julialanguage",{"2":{"114":1}}],["julialang",{"2":{"57":1,"66":1,"67":1,"183":1}}],["juliaa=1",{"2":{"183":1}}],["juliaaai",{"2":{"183":1}}],["juliaauth",{"2":{"183":1}}],["juliaapi",{"2":{"183":2}}],["juliaa",{"2":{"182":1,"183":2}}],["juliaalternative",{"2":{"183":1}}],["juliaalign",{"2":{"182":1}}],["juliaalltagfilter",{"2":{"182":1}}],["juliaanthropic",{"2":{"183":2}}],["juliaanthropicschema",{"2":{"183":1}}],["juliaanswer",{"2":{"182":1}}],["juliaanytagfilter",{"2":{"182":1}}],["juliaannotatednode",{"2":{"182":1}}],["juliaannotater",{"2":{"64":1,"182":1}}],["juliaannotate",{"2":{"64":2,"182":2}}],["juliaadvancedretriever",{"2":{"182":1}}],["juliaadvancedgenerator",{"2":{"182":1}}],["juliaadd",{"2":{"181":1,"182":1}}],["juliaabstracttool",{"2":{"183":1}}],["juliaabstractretriever",{"2":{"182":1}}],["juliaabstractmultiindex",{"2":{"182":1}}],["juliaabstractindexbuilder",{"2":{"182":1}}],["juliaabstractgenerator",{"2":{"182":1}}],["juliaabstractchunkindex",{"2":{"182":1}}],["juliaabstractcandidatechunks",{"2":{"182":1}}],["juliaagenttools",{"2":{"181":1}}],["juliaassume",{"2":{"64":1,"182":1}}],["juliaaitools",{"2":{"183":3}}],["juliaaitemplate",{"2":{"183":1}}],["juliaaitemplates",{"2":{"94":1,"183":2}}],["juliaaiimage",{"2":{"183":2}}],["juliaaimessage",{"2":{"183":1}}],["juliaaiscan",{"2":{"181":1,"183":3}}],["juliaaiextract",{"2":{"181":1,"183":3}}],["juliaaiembed",{"2":{"30":1,"31":1,"181":1,"183":3}}],["juliaairag",{"2":{"64":1,"182":1}}],["juliaairetry",{"2":{"55":1,"181":1}}],["juliaaicodefixer",{"2":{"55":1,"181":2}}],["juliaaicode",{"2":{"55":1,"183":1}}],["juliaaicall",{"2":{"55":3,"181":6}}],["juliaaiclassify",{"2":{"18":2,"181":1,"183":5}}],["juliaaigenerate",{"2":{"55":1,"94":2,"181":1,"183":8}}],["juliaai",{"2":{"34":1,"71":2,"92":1,"183":1}}],["juliafinalize",{"2":{"183":3}}],["juliafind",{"2":{"182":8,"183":2}}],["juliafields",{"2":{"183":4}}],["juliafireworksopenaischema",{"2":{"183":1}}],["juliafilechunker",{"2":{"182":1}}],["juliafilename",{"2":{"9":1}}],["juliafiles",{"2":{"2":1}}],["juliaflashranker",{"2":{"182":1}}],["juliafeedback",{"2":{"108":1}}],["juliafor",{"2":{"55":1,"181":1}}],["juliart",{"2":{"182":1}}],["juliarank",{"2":{"182":2}}],["juliarankgptresult",{"2":{"182":1}}],["juliarankgptreranker",{"2":{"182":1}}],["juliaragresult",{"2":{"182":1}}],["juliaragconfig",{"2":{"182":1}}],["juliaragtools",{"2":{"182":1}}],["juliarun",{"2":{"181":2,"182":2}}],["juliarender",{"2":{"183":9}}],["juliarendered",{"2":{"107":1}}],["juliaremove",{"2":{"183":1}}],["juliaregister",{"2":{"183":2}}],["juliarerank",{"2":{"182":2}}],["juliarefiner",{"2":{"182":1}}],["juliarefine",{"2":{"182":3}}],["juliaretryconfig",{"2":{"181":1}}],["juliaretrieve",{"2":{"64":1,"182":1}}],["juliaretriever",{"2":{"62":1,"64":2,"182":2}}],["juliareciprocal",{"2":{"182":2}}],["juliareceive",{"2":{"182":1}}],["juliarecaptask",{"0":{"169":1}}],["juliarecapcottask",{"0":{"168":1}}],["juliarecursive",{"2":{"67":2,"183":2}}],["juliareplace",{"2":{"67":1,"183":1}}],["juliarephrase",{"2":{"62":1,"182":3}}],["juliaresponse",{"2":{"183":1}}],["juliaresize",{"2":{"183":2}}],["juliares",{"2":{"64":1,"182":1}}],["juliaresult",{"2":{"58":1,"106":1,"108":1,"182":1,"183":4}}],["juliaresults",{"2":{"7":1}}],["juliar",{"2":{"50":2,"179":2}}],["juliasharegptschema",{"2":{"183":1}}],["juliasave",{"2":{"183":3}}],["juliasaverschema",{"2":{"183":1}}],["juliasample",{"2":{"181":1}}],["juliasamplenode",{"2":{"181":1}}],["juliaspec",{"2":{"183":1}}],["juliasplit",{"2":{"182":1}}],["juliascore",{"2":{"182":1}}],["juliaschema",{"2":{"42":1}}],["juliastreamed",{"2":{"183":1}}],["juliastreamchunk",{"2":{"183":1}}],["juliastreamcallback",{"2":{"183":1}}],["juliastemmer",{"2":{"182":1}}],["juliastyler",{"2":{"182":1}}],["juliasubchunkindex",{"2":{"182":1}}],["juliasimpleretriever",{"2":{"182":1}}],["juliasimplerephraser",{"2":{"182":1}}],["juliasimplerefiner",{"2":{"182":1}}],["juliasimpleindexer",{"2":{"182":1}}],["juliasimplegenerator",{"2":{"182":1}}],["juliasimplebm25retriever",{"2":{"182":1}}],["juliasimpleanswerer",{"2":{"182":1}}],["juliasig",{"2":{"108":3}}],["juliasetpropertynested",{"2":{"182":1}}],["juliaset",{"2":{"182":1,"183":2}}],["juliaselect",{"2":{"181":1}}],["juliasentences",{"2":{"58":1}}],["juliaserialize",{"2":{"2":1}}],["juliamarkdown",{"2":{"183":1}}],["juliamistralopenaischema",{"2":{"183":1}}],["juliamessages",{"2":{"183":1}}],["juliamerge",{"2":{"182":1}}],["juliameta",{"2":{"98":1}}],["juliamultiindex",{"2":{"182":1}}],["juliamultifinder",{"2":{"182":1}}],["juliamulticandidatechunks",{"2":{"182":1}}],["juliamodelspec",{"2":{"183":1}}],["juliamodel",{"2":{"40":1,"108":2,"183":2}}],["juliamsgs",{"2":{"9":1}}],["juliamsg",{"2":{"9":1,"13":1,"14":2,"21":1,"23":1,"24":1,"26":1,"28":1,"29":1,"30":1,"31":1,"34":1,"39":1,"43":1,"45":2,"46":1,"47":1,"55":1,"64":1,"72":1,"182":3,"183":14}}],["juliacallback",{"2":{"183":1}}],["juliacall",{"2":{"183":1}}],["juliacandidatechunks",{"2":{"182":1}}],["juliacustomopenaischema",{"2":{"183":1}}],["juliacerebrasopenaischema",{"2":{"183":1}}],["juliacc",{"2":{"182":1}}],["juliachunkkeywordsindex",{"2":{"182":2}}],["juliachunkembeddingsindex",{"2":{"182":1}}],["juliachoices",{"2":{"19":1,"93":1,"183":5}}],["juliacfg",{"2":{"62":1,"64":1,"182":3}}],["juliacb",{"2":{"55":1,"181":1}}],["juliacohere",{"2":{"182":1}}],["juliacoherereranker",{"2":{"182":1}}],["juliacosinesimilarity",{"2":{"182":1}}],["juliacountry",{"2":{"71":1}}],["juliacommands",{"2":{"67":1,"183":1}}],["juliacode",{"2":{"55":2,"183":4}}],["juliaconfigure",{"2":{"183":1}}],["juliaconv",{"2":{"183":4}}],["juliaconversation",{"2":{"35":1,"41":1,"92":1}}],["juliacontextenumerator",{"2":{"182":1}}],["juliacontext",{"2":{"67":1,"183":1}}],["juliaconst",{"2":{"16":1,"23":1,"24":1,"26":1,"183":15}}],["juliacreate",{"2":{"50":1,"179":1,"182":1,"183":1}}],["juliapush",{"2":{"183":1}}],["juliapositions1",{"2":{"182":2}}],["juliapermutation",{"2":{"182":1}}],["juliaparse",{"2":{"183":1}}],["juliaparent",{"2":{"182":1}}],["juliapack",{"2":{"182":1}}],["juliapackage",{"2":{"114":1}}],["juliapassthroughtagger",{"2":{"182":1}}],["juliapprint",{"2":{"58":1,"183":2}}],["juliapreferences",{"2":{"183":1}}],["juliapreprocess",{"2":{"182":1}}],["juliaprompt",{"2":{"108":1}}],["juliapromptingtools",{"2":{"60":1,"182":1,"183":5}}],["juliaprompts",{"2":{"15":1}}],["juliaprint",{"2":{"55":1,"181":1,"182":1,"183":3}}],["juliapt",{"2":{"9":2,"42":1,"55":1,"64":1,"94":2,"97":1,"98":1,"182":3,"183":5}}],["juliajulia>",{"2":{"14":1,"55":1,"181":1,"183":1}}],["juliadistance",{"2":{"183":1}}],["juliadetect",{"2":{"183":1}}],["juliadecode",{"2":{"183":1}}],["juliadeepseekopenaischema",{"2":{"183":1}}],["juliadatabricksopenaischema",{"2":{"183":1}}],["juliadatamessage",{"2":{"183":1}}],["juliadataexpertask",{"2":{"9":2}}],["juliadry",{"2":{"97":1}}],["juliadocumenttermmatrix",{"2":{"182":1}}],["juliadoc",{"2":{"64":1,"182":1}}],["juliadocs",{"2":{"46":1}}],["juliadf",{"2":{"7":1}}],["juliatypeof",{"2":{"183":5}}],["juliatool",{"2":{"183":6}}],["juliatogetheropenaischema",{"2":{"183":1}}],["juliatokenize",{"2":{"182":1}}],["juliatoken",{"2":{"182":1}}],["juliatags",{"2":{"182":1}}],["juliatavilysearchrefiner",{"2":{"182":1}}],["juliatavily",{"2":{"179":1}}],["juliatryparse",{"2":{"183":1}}],["juliatracerschema",{"2":{"183":1}}],["juliatracermessagelike",{"2":{"183":1}}],["juliatracermessage",{"2":{"183":1}}],["juliatranslate",{"2":{"182":2}}],["juliatrigrams",{"2":{"182":1}}],["juliatrigram",{"2":{"182":1}}],["juliatrigramannotater",{"2":{"182":1}}],["juliatruncate",{"2":{"181":1}}],["juliathompsonsampling",{"2":{"181":1}}],["juliatemplate",{"2":{"107":1}}],["juliatextchunker",{"2":{"182":1}}],["juliatext1",{"2":{"67":1,"183":1}}],["juliatext",{"2":{"17":1,"67":7,"183":7}}],["juliatpl",{"2":{"9":1}}],["juliatmps",{"2":{"9":1,"14":2,"183":4}}],["juliaencode",{"2":{"183":1}}],["juliaenv",{"2":{"84":1}}],["juliaexecute",{"2":{"183":1}}],["juliaextract",{"2":{"182":1,"183":8}}],["juliaexperimental",{"2":{"180":1}}],["juliaexperttask",{"2":{"183":1}}],["juliaexperttestcodexml",{"0":{"175":1}}],["juliaexperttestcode",{"0":{"167":1}}],["juliaexperttranscriptcritic",{"0":{"142":1}}],["juliaexpertcottaskxml",{"0":{"174":1}}],["juliaexpertcottask",{"0":{"166":1}}],["juliaexpertaskxml",{"0":{"173":1}}],["juliaexpertask",{"0":{"165":1},"2":{"9":6,"14":4,"55":1,"106":2,"181":1,"183":6}}],["juliaeval",{"2":{"183":1}}],["juliaevaluate",{"2":{"181":1}}],["juliaevals",{"2":{"4":1,"5":1}}],["juliaerror",{"2":{"55":1,"181":1}}],["juliax",{"2":{"6":1,"182":1,"183":1}}],["julia>",{"2":{"5":1}}],["julia",{"2":{"2":3,"4":1,"6":1,"7":1,"9":14,"11":1,"14":6,"20":2,"21":3,"22":2,"24":1,"27":1,"29":1,"31":1,"43":1,"52":1,"54":2,"55":9,"56":1,"57":1,"58":26,"64":3,"67":1,"69":3,"78":6,"79":1,"83":2,"84":3,"87":1,"93":2,"94":1,"97":1,"104":1,"105":1,"106":1,"107":2,"108":2,"114":2,"124":4,"130":4,"131":2,"132":1,"142":8,"164":3,"165":2,"166":2,"167":3,"168":7,"169":8,"173":2,"174":2,"175":3,"181":12,"182":17,"183":72}}],["jls",{"2":{"2":2}}],["jl",{"0":{"85":1},"2":{"0":3,"1":1,"2":3,"8":2,"9":1,"11":1,"24":3,"26":2,"27":1,"29":1,"30":1,"31":1,"47":1,"55":1,"58":3,"63":1,"64":1,"65":1,"67":3,"69":2,"70":1,"75":1,"84":2,"87":2,"88":1,"91":3,"96":1,"99":1,"102":1,"106":1,"107":1,"108":1,"114":4,"181":1,"182":5,"183":20}}],["22",{"2":{"181":1}}],["2277",{"2":{"140":1}}],["26078",{"2":{"181":3}}],["267",{"2":{"128":1}}],["29",{"2":{"183":2}}],["29826",{"2":{"181":3}}],["2900",{"2":{"67":2,"183":2}}],["21",{"2":{"183":1}}],["2190",{"2":{"153":1}}],["210",{"2":{"132":1}}],["278",{"2":{"115":1}}],["2733",{"2":{"55":4,"181":4}}],["2500",{"2":{"183":7}}],["256",{"2":{"182":2}}],["25px",{"2":{"67":1,"182":1,"183":6}}],["25",{"2":{"64":3,"182":4}}],["248",{"2":{"173":1}}],["2487",{"2":{"130":1}}],["24",{"2":{"36":1}}],["24622",{"2":{"21":1,"183":2}}],["239",{"2":{"178":1}}],["23",{"2":{"55":1,"181":1,"183":1}}],["23rd",{"2":{"31":1}}],["237",{"2":{"9":1,"14":1,"165":1,"183":2}}],["2s",{"2":{"22":1,"54":1,"55":2,"181":2}}],["2064",{"2":{"142":1}}],["2000",{"2":{"167":1,"175":1}}],["200",{"2":{"82":1,"183":6}}],["20506",{"2":{"55":1,"181":1}}],["20737",{"2":{"55":4,"181":4}}],["2049",{"2":{"152":1}}],["20493",{"2":{"55":2,"181":2}}],["2048",{"2":{"28":1,"183":5}}],["2021",{"2":{"114":2}}],["2020",{"2":{"72":1}}],["20240307",{"2":{"183":1}}],["2024",{"2":{"31":1,"83":1,"183":1}}],["2023",{"2":{"16":1,"58":1,"183":12}}],["20",{"2":{"7":6,"19":1,"67":2,"124":1,"125":1,"182":3,"183":8}}],["2examples",{"2":{"7":1}}],["2",{"0":{"156":1},"1":{"157":1,"158":1,"159":1,"160":1,"161":1,"162":1,"163":1,"164":1,"165":1,"166":1,"167":1,"168":1,"169":1,"170":1,"171":1,"172":1,"173":1,"174":1,"175":1},"2":{"0":1,"7":3,"9":3,"12":1,"14":1,"20":1,"21":3,"22":5,"29":3,"46":1,"47":2,"50":1,"54":5,"55":19,"58":1,"64":2,"67":7,"78":1,"79":1,"82":1,"93":1,"94":1,"95":2,"108":2,"112":1,"114":2,"115":1,"120":1,"121":1,"130":3,"131":1,"136":1,"138":1,"154":1,"158":1,"160":2,"164":1,"166":1,"167":2,"168":3,"169":3,"175":2,"179":1,"181":33,"182":14,"183":34}}],["1`",{"2":{"183":1}}],["1+1",{"2":{"183":1}}],["16",{"2":{"183":2}}],["1643",{"2":{"175":1}}],["16k",{"2":{"67":1,"181":1,"183":1}}],["17",{"2":{"183":2}}],["175b",{"2":{"183":3}}],["1712",{"2":{"171":1}}],["172",{"2":{"159":1}}],["1>",{"2":{"152":1}}],["184",{"2":{"157":1}}],["18",{"2":{"149":1,"181":1,"183":1}}],["180",{"2":{"20":1,"183":4}}],["180cm",{"2":{"20":1,"183":7}}],["150",{"2":{"160":1}}],["1501",{"2":{"160":1}}],["1506",{"2":{"154":1}}],["1515",{"2":{"141":1}}],["151",{"2":{"137":1}}],["1536×2",{"2":{"183":1}}],["1536",{"2":{"17":1,"183":1}}],["1=worst",{"2":{"122":1}}],["13184",{"2":{"181":2}}],["1396",{"2":{"120":1}}],["1392",{"2":{"118":1}}],["1384",{"2":{"114":1}}],["1m",{"2":{"72":1}}],["1em",{"2":{"67":1,"182":1,"183":6}}],["1examples",{"2":{"7":1}}],["1px",{"2":{"67":1,"182":1,"183":6}}],["1475",{"2":{"167":1}}],["1415",{"2":{"121":1}}],["14966",{"2":{"55":4,"181":4}}],["14",{"2":{"55":1,"181":1}}],["111",{"2":{"183":2}}],["11",{"2":{"182":1}}],["11434",{"2":{"89":1,"183":2}}],["1143",{"2":{"67":1,"168":1,"169":1,"183":1}}],["114",{"2":{"24":1,"26":1}}],["1141",{"2":{"21":1,"183":2}}],["1106",{"2":{"16":2}}],["1928",{"2":{"120":3}}],["190",{"2":{"20":2,"183":6}}],["19",{"2":{"20":2,"58":1,"183":6}}],["124",{"2":{"183":1}}],["128",{"2":{"182":2}}],["127",{"2":{"89":1,"183":5}}],["12940",{"2":{"55":1,"181":1}}],["12",{"2":{"55":2,"79":1,"181":2,"182":1,"183":2}}],["120",{"2":{"11":2,"79":1,"106":2,"183":12}}],["123",{"2":{"9":1,"90":1}}],["10897",{"2":{"183":5}}],["10`",{"2":{"168":1,"169":1}}],["1073",{"2":{"126":1}}],["1074",{"2":{"117":1}}],["10examples",{"2":{"7":1}}],["10×8",{"2":{"7":1}}],["100k",{"2":{"182":1}}],["1000",{"2":{"82":1,"93":1,"167":1,"175":1,"177":1,"182":3,"183":4}}],["100x",{"2":{"79":1}}],["100",{"2":{"7":3,"55":4,"62":2,"64":5,"160":1,"181":3,"182":15,"183":17}}],["10",{"2":{"6":1,"7":4,"12":1,"55":8,"58":1,"64":1,"67":1,"78":2,"79":2,"108":1,"126":1,"181":6,"182":11,"183":18}}],["1024x1024",{"2":{"183":2}}],["1024",{"2":{"183":2}}],["102",{"2":{"4":1,"12":1}}],["1",{"0":{"155":1},"2":{"0":1,"5":1,"6":1,"7":13,"9":4,"11":2,"14":3,"17":2,"19":1,"22":6,"28":1,"30":1,"32":1,"33":1,"37":2,"40":1,"45":1,"47":1,"54":6,"55":48,"58":6,"64":9,"66":2,"67":9,"71":1,"72":2,"78":1,"82":1,"89":1,"92":1,"94":4,"95":2,"98":1,"106":3,"107":1,"108":3,"110":1,"112":2,"114":4,"115":1,"117":2,"118":2,"120":5,"121":17,"122":3,"124":1,"125":1,"126":1,"127":1,"128":1,"130":3,"131":3,"132":1,"134":1,"136":3,"137":2,"138":2,"140":1,"141":1,"142":1,"144":1,"145":1,"147":2,"149":2,"150":1,"152":5,"153":4,"154":3,"157":1,"158":2,"159":2,"160":2,"161":1,"162":1,"163":1,"164":1,"165":1,"167":3,"168":6,"169":5,"170":1,"171":1,"173":1,"174":1,"175":2,"177":1,"178":1,"181":63,"182":47,"183":89}}],["nfeedback",{"2":{"181":6}}],["n```",{"2":{"130":1,"131":1,"183":1}}],["nwhat",{"2":{"107":1}}],["nwe",{"2":{"7":1}}],["nparagraph",{"2":{"67":6,"95":2,"183":6}}],["n=5",{"2":{"64":1,"182":1}}],["n=2",{"2":{"22":1,"54":1,"55":1,"181":1}}],["nsfw",{"2":{"55":1,"181":1}}],["nsemijoin",{"2":{"7":1}}],["nbsp",{"2":{"50":1,"55":9,"64":6,"67":5,"179":2,"180":1,"181":37,"182":142,"183":184}}],["ngl",{"2":{"28":2}}],["nli",{"2":{"18":1}}],["nt2",{"2":{"182":3}}],["nt1",{"2":{"182":3}}],["nt",{"2":{"182":4}}],["nthreads",{"2":{"182":6}}],["nthe",{"2":{"7":1}}],["ntasks=2",{"2":{"79":2}}],["ntasks=1",{"2":{"64":1,"79":1,"182":4}}],["ntasks=10",{"2":{"15":1}}],["ntasks",{"2":{"64":1,"79":1,"182":7}}],["n7",{"2":{"7":1}}],["n6",{"2":{"7":1}}],["n5",{"2":{"7":1}}],["numerical",{"2":{"58":1}}],["num",{"2":{"37":2,"112":4,"181":6,"182":2}}],["number",{"2":{"15":1,"28":1,"50":1,"55":14,"58":1,"64":8,"66":1,"67":2,"79":4,"93":10,"108":1,"112":1,"138":1,"167":1,"175":1,"179":1,"181":26,"182":19,"183":25}}],["numbers",{"2":{"2":1,"55":4,"167":2,"175":2,"181":1,"182":1,"183":3}}],["null",{"2":{"7":1,"183":3}}],["n4",{"2":{"7":1}}],["n3",{"2":{"7":1}}],["n2",{"2":{"7":1}}],["n2×3",{"2":{"7":1}}],["n2×2",{"2":{"7":2}}],["n1",{"2":{"7":1}}],["njob",{"2":{"7":2}}],["njulia",{"2":{"7":1}}],["niche",{"2":{"126":1}}],["nice",{"2":{"9":1,"14":1,"39":1,"40":1,"42":1,"183":4}}],["nid",{"2":{"7":2}}],["nintroduction",{"2":{"7":1}}],["n─────┼───────────────",{"2":{"7":1}}],["n─────┼─────────────────────────",{"2":{"7":1}}],["n─────┼─────────────────",{"2":{"7":1}}],["naming",{"2":{"183":1}}],["name`",{"2":{"183":1}}],["named",{"2":{"167":1,"175":1,"183":19}}],["namedtuple=namedtuple",{"2":{"182":1}}],["namedtuples",{"2":{"168":1,"169":1,"182":1}}],["namedtuple",{"2":{"11":2,"55":7,"64":61,"106":2,"179":2,"181":9,"182":79,"183":84}}],["namespace",{"2":{"70":1}}],["names",{"2":{"7":3,"27":1,"28":1,"58":1,"60":1,"62":1,"67":1,"108":2,"114":5,"164":1,"168":1,"169":1,"182":1,"183":27}}],["name",{"2":{"7":6,"9":6,"12":1,"13":2,"14":2,"16":1,"28":1,"29":1,"30":1,"31":2,"37":1,"40":3,"42":1,"89":1,"90":1,"91":1,"92":5,"94":7,"96":1,"97":3,"105":2,"108":9,"114":1,"130":2,"158":1,"160":1,"167":1,"175":1,"181":1,"182":1,"183":118}}],["name=",{"2":{"7":1,"13":1,"90":1,"91":1,"94":2,"97":1,"98":1,"168":1,"169":1,"183":4}}],["narrative",{"2":{"171":2}}],["nature",{"2":{"153":1,"183":1}}],["naturally",{"2":{"183":1}}],["natural",{"2":{"58":1,"126":1,"128":1,"183":1}}],["native",{"2":{"83":1,"108":1}}],["navigate",{"2":{"1":1}}],["n",{"2":{"7":17,"9":8,"14":2,"22":2,"54":2,"55":7,"62":2,"64":8,"67":24,"95":10,"105":2,"107":4,"108":4,"130":1,"131":1,"181":18,"182":18,"183":30}}],["nedeed",{"2":{"183":1}}],["neighboring",{"2":{"182":1}}],["network",{"2":{"182":3}}],["never",{"2":{"182":1,"183":1}}],["negative",{"2":{"167":1,"171":1,"175":1,"182":1}}],["nesting",{"2":{"167":1,"175":1}}],["nested",{"2":{"0":1,"62":2,"64":5,"167":1,"175":1,"182":16,"183":3}}],["neuroplasticity",{"2":{"114":2}}],["nexample",{"2":{"108":1}}],["next",{"0":{"8":1},"2":{"22":1,"54":1,"55":1,"92":1,"107":2,"153":14,"160":1,"181":2,"182":3,"183":2}}],["nearest",{"2":{"94":2,"183":2}}],["near",{"2":{"56":1}}],["necessary>",{"2":{"160":1}}],["necessary",{"2":{"11":1,"24":1,"27":1,"41":1,"52":1,"55":2,"56":1,"63":1,"91":1,"97":1,"98":1,"99":1,"106":1,"142":1,"152":1,"181":1,"183":18}}],["news",{"2":{"183":2}}],["newline",{"2":{"67":3,"182":2,"183":4}}],["newlines",{"2":{"66":1,"67":2,"95":1,"183":3}}],["new",{"0":{"83":1},"2":{"9":2,"13":2,"31":1,"55":8,"60":3,"64":2,"69":1,"71":1,"77":1,"78":1,"79":1,"83":3,"89":1,"92":1,"105":1,"107":1,"117":2,"118":1,"130":1,"131":1,"152":1,"153":1,"168":1,"169":1,"181":8,"182":5,"183":32}}],["needing",{"2":{"120":1}}],["needs",{"2":{"55":1,"102":1,"108":1,"130":1,"142":1,"181":1,"183":1}}],["needed>",{"2":{"160":1}}],["needed",{"2":{"11":1,"20":1,"22":1,"52":1,"54":1,"64":1,"93":1,"106":1,"117":2,"118":2,"140":1,"142":1,"171":1,"181":2,"182":1,"183":5}}],["need",{"2":{"3":1,"4":2,"5":1,"7":1,"11":2,"12":2,"28":1,"32":1,"37":1,"39":1,"42":1,"49":1,"54":1,"55":1,"57":1,"58":3,"62":1,"64":1,"66":1,"67":1,"69":2,"79":1,"80":1,"82":1,"84":1,"90":1,"91":2,"93":2,"94":1,"103":2,"106":2,"108":4,"130":1,"131":1,"132":1,"152":1,"153":1,"154":1,"166":1,"167":1,"169":1,"171":1,"174":1,"175":1,"181":1,"182":3,"183":28}}],["noprocessor",{"2":{"182":4,"183":1}}],["nopostprocessor",{"2":{"64":2,"182":6,"183":1}}],["noembedder",{"2":{"182":3,"183":1}}],["noisy",{"2":{"118":1}}],["noise",{"2":{"2":1}}],["noschema",{"2":{"97":3,"107":1,"183":3}}],["noreranker",{"2":{"182":4,"183":1}}],["norephraser",{"2":{"182":5,"183":1}}],["norefiner",{"2":{"64":3,"182":7,"183":1}}],["normal",{"2":{"83":1,"183":9}}],["normalization",{"2":{"47":1}}],["normalizes",{"2":{"182":1}}],["normalized",{"2":{"17":1,"66":1,"67":1,"182":1,"183":3}}],["normalize",{"2":{"17":2,"47":2,"67":2,"182":3,"183":8}}],["norm",{"2":{"67":2,"183":2}}],["nodes",{"2":{"55":1,"64":5,"181":4,"182":15}}],["node",{"2":{"55":5,"64":3,"181":31,"182":38,"183":4}}],["nods",{"2":{"41":2}}],["nomic",{"2":{"31":2}}],["non",{"2":{"12":1,"22":1,"54":1,"55":1,"90":1,"181":1,"182":2,"183":15}}],["none",{"2":{"4":1,"22":1,"54":1,"64":2,"114":1,"115":1,"120":1,"138":2,"152":1,"153":1,"154":1,"167":1,"169":1,"171":1,"174":1,"175":1,"182":3}}],["no",{"2":{"11":1,"16":1,"21":1,"22":1,"54":1,"55":5,"58":3,"64":3,"83":2,"106":1,"107":2,"130":1,"132":1,"138":2,"153":1,"160":1,"181":6,"182":20,"183":45}}],["now",{"2":{"9":1,"24":1,"26":1,"32":1,"41":1,"55":2,"58":1,"82":1,"87":1,"93":1,"94":1,"96":1,"108":1,"181":3,"182":1,"183":8}}],["notfound",{"2":{"181":1}}],["notagfilter",{"2":{"182":6,"183":1}}],["notagger",{"2":{"64":2,"182":10,"183":1}}],["notation",{"2":{"55":1,"181":1}}],["notification",{"2":{"81":1}}],["notion",{"2":{"52":1}}],["notice",{"2":{"22":3,"23":1,"34":1,"54":2,"55":2,"58":1,"60":1,"62":2,"71":1,"92":2,"93":1,"96":1,"105":1,"107":1,"108":1,"181":2,"182":1,"183":6}}],["nothing",{"2":{"6":1,"7":1,"13":1,"20":4,"31":1,"55":19,"64":9,"78":1,"93":1,"97":1,"108":4,"117":2,"118":2,"152":1,"158":1,"181":24,"182":55,"183":218}}],["not",{"0":{"83":1},"2":{"1":1,"5":1,"7":3,"9":2,"11":3,"12":1,"13":2,"20":1,"23":1,"24":1,"27":1,"32":1,"35":2,"36":2,"41":1,"42":2,"43":1,"50":1,"52":1,"55":21,"60":1,"64":1,"67":5,"69":3,"74":1,"76":2,"77":2,"78":2,"80":2,"81":1,"83":2,"84":1,"89":1,"92":1,"93":4,"95":1,"99":1,"106":3,"108":9,"112":1,"117":1,"118":4,"120":1,"121":3,"126":1,"128":1,"130":2,"141":1,"152":5,"153":1,"154":1,"166":1,"167":1,"168":2,"169":2,"171":2,"175":1,"177":1,"179":1,"180":2,"181":23,"182":11,"183":98}}],["notes",{"2":{"55":2,"64":4,"67":2,"152":4,"153":1,"162":6,"164":7,"181":5,"182":8,"183":7}}],["notexist",{"2":{"22":1,"54":1,"55":2,"181":2}}],["noteworthy",{"2":{"11":1,"64":1,"182":1}}],["note",{"2":{"0":2,"1":2,"6":1,"7":4,"9":1,"20":1,"22":1,"24":1,"27":1,"30":1,"31":1,"37":1,"42":2,"54":1,"55":3,"63":1,"64":1,"74":1,"80":1,"83":1,"94":1,"105":1,"107":1,"108":1,"160":1,"180":1,"181":5,"182":3,"183":38}}],["pwd",{"2":{"183":2}}],["pct",{"2":{"171":3}}],["photos",{"2":{"178":1}}],["phrase",{"2":{"158":1}}],["phrasings",{"2":{"126":1}}],["phase",{"2":{"63":3,"64":1,"182":1}}],["phases",{"2":{"60":1}}],["python",{"2":{"43":1,"58":1,"168":1,"169":1,"183":2}}],["png",{"2":{"21":2,"43":2,"183":10}}],["p",{"2":{"19":2,"93":4,"181":2,"183":5}}],["pprint",{"2":{"11":1,"57":2,"58":1,"64":4,"182":12,"183":11}}],["plots",{"2":{"114":2}}],["plural",{"2":{"96":1}}],["plus",{"2":{"18":2,"93":2,"183":4}}],["please",{"2":{"9":1,"78":1,"80":2,"121":1,"177":1,"181":1,"183":1}}],["plausible",{"2":{"171":2}}],["plain",{"2":{"160":2,"182":1}}],["plaintextblog",{"2":{"177":1}}],["plaintextexplain",{"2":{"171":1}}],["plaintextextract",{"2":{"115":1}}],["plaintextnotes",{"2":{"162":1,"164":1}}],["plaintextuser",{"2":{"136":1,"138":1,"160":1}}],["plaintextusing",{"2":{"9":1}}],["plaintextoriginal",{"2":{"127":1}}],["plaintexthere",{"2":{"126":1,"128":1}}],["plaintextquery",{"2":{"125":1}}],["plaintextwrite",{"2":{"124":1}}],["plaintextwe",{"2":{"117":1,"118":1}}],["plaintextignore",{"2":{"130":1}}],["plaintexti",{"2":{"112":1}}],["plaintextyour",{"2":{"177":1}}],["plaintextyou",{"2":{"112":1,"114":1,"120":1,"121":1,"122":1,"124":1,"125":1,"126":1,"127":1,"128":1,"136":1,"137":1,"138":1,"144":1,"145":1,"147":1,"150":1,"157":1,"159":1,"161":1,"163":1,"165":1,"166":1,"167":1,"168":1,"169":1,"170":1,"171":1,"173":1,"174":1,"175":1,"178":1}}],["plaintextact",{"2":{"110":1,"117":1,"118":1,"140":1,"141":1,"142":1,"152":1,"153":1,"158":1,"160":1,"162":1,"164":1}}],["plaintextaimessage",{"2":{"13":1}}],["plaintext2",{"2":{"97":3,"107":2}}],["plaintext>",{"2":{"13":1}}],["plaintext",{"2":{"12":1,"58":1,"71":2,"72":1,"110":1,"114":1,"115":1,"120":1,"121":1,"122":1,"130":1,"131":2,"132":2,"134":2,"137":1,"140":1,"141":1,"142":1,"144":1,"145":1,"147":1,"149":2,"150":1,"152":1,"153":1,"154":1,"156":1,"157":1,"158":1,"159":1,"161":1,"163":1,"165":1,"166":1,"167":1,"168":1,"169":1,"170":1,"173":1,"174":1,"175":1,"178":1}}],["placing",{"2":{"130":1,"131":1}}],["places",{"2":{"108":1,"114":1,"183":2}}],["place",{"2":{"22":1,"55":1,"60":1,"102":1,"153":1,"181":2,"183":4}}],["placeholder",{"2":{"9":1,"64":1,"94":1,"105":1,"107":1,"137":1,"144":1,"145":1,"147":1,"150":1,"182":1,"183":6}}],["placeholders",{"0":{"72":1},"2":{"9":4,"14":2,"100":1,"105":1,"107":4,"108":2,"110":2,"112":2,"114":2,"115":2,"117":2,"118":2,"120":2,"121":2,"122":2,"124":2,"125":2,"126":2,"127":2,"128":2,"130":2,"131":2,"132":2,"134":2,"136":2,"137":1,"138":2,"140":2,"141":2,"142":2,"144":1,"145":1,"147":1,"149":2,"150":1,"152":2,"153":2,"154":2,"157":2,"158":2,"159":2,"160":2,"161":2,"162":2,"163":2,"164":2,"165":2,"166":2,"167":2,"168":2,"169":2,"170":2,"171":3,"173":2,"174":2,"175":2,"177":2,"178":2,"183":8}}],["platform",{"2":{"80":1,"183":2}}],["plant",{"2":{"19":2,"93":1,"183":8}}],["plan",{"2":{"17":1,"24":1,"80":1,"153":1,"183":2}}],["playful",{"2":{"177":1}}],["plays",{"2":{"153":1}}],["playing",{"2":{"152":1}}],["play",{"2":{"8":1,"22":1,"54":1,"55":2,"181":2}}],["pkgdir",{"2":{"9":1}}],["pkg",{"2":{"9":2,"32":2,"55":1,"70":2,"78":1,"183":2}}],["pesona",{"2":{"162":1}}],["penicillin",{"2":{"120":2}}],["perched",{"2":{"183":1}}],["permanently",{"2":{"183":1}}],["permanent",{"2":{"183":1}}],["permutation",{"2":{"182":12,"183":7}}],["persistent",{"2":{"183":1}}],["persistently",{"2":{"90":1}}],["persist",{"2":{"183":1}}],["persists",{"2":{"85":1}}],["personality",{"2":{"183":1}}],["personally",{"2":{"97":1}}],["personal",{"2":{"89":1,"183":2}}],["persona",{"0":{"151":1},"1":{"152":1,"153":1,"154":1},"2":{"9":1,"149":1,"150":1,"161":1,"162":4,"164":1,"183":5}}],["personas",{"2":{"9":1}}],["person",{"2":{"7":2,"20":1,"183":5}}],["periods",{"2":{"182":1}}],["period",{"2":{"79":1}}],["perfectly",{"2":{"121":1}}],["perfect",{"2":{"67":1,"108":1,"124":1,"183":3}}],["performance",{"2":{"15":1,"58":2,"127":1,"142":1,"168":1,"169":1,"182":2,"183":1}}],["perform",{"2":{"7":1}}],["per",{"2":{"64":1,"79":5,"82":3,"108":4,"114":1,"181":1,"182":4,"183":9}}],["perhaps",{"2":{"41":1}}],["perplexity",{"2":{"24":1,"27":1}}],["people",{"2":{"7":4,"72":1,"152":1,"183":2}}],["push",{"2":{"183":6}}],["punctuation",{"2":{"182":2}}],["puppy",{"2":{"158":1}}],["pure",{"2":{"126":1}}],["purposes",{"2":{"97":1,"107":1}}],["purpose",{"2":{"5":2,"6":1,"7":6,"11":2,"105":1,"106":2,"108":1,"140":2,"162":6,"164":6,"167":1,"175":1,"183":1}}],["published",{"2":{"58":1,"114":1}}],["pull",{"2":{"37":1,"89":2}}],["put",{"2":{"2":1,"55":1,"79":1,"108":1,"181":1,"183":1}}],["pipe",{"2":{"181":1,"183":1}}],["pipelines",{"2":{"55":1,"180":1,"181":2}}],["pipeline",{"2":{"6":1,"56":1,"58":2,"60":2,"62":1,"64":10,"97":3,"182":16}}],["pinpoint",{"2":{"153":1}}],["pinpointing",{"2":{"130":1}}],["pirate",{"2":{"94":4,"183":6}}],["piece",{"2":{"64":1,"182":3}}],["pieces",{"2":{"2":1,"63":1}}],["picking",{"2":{"144":1}}],["pick",{"2":{"8":1,"55":1,"102":1,"108":1,"136":1,"138":1,"181":1,"183":2}}],["picture",{"2":{"5":2,"6":1,"7":2}}],["pounds",{"2":{"183":1}}],["port",{"2":{"183":5}}],["porsche",{"2":{"93":2,"108":2}}],["pop",{"2":{"181":1}}],["popular",{"2":{"88":1}}],["population=",{"2":{"72":1}}],["population",{"2":{"71":1,"72":4}}],["points",{"2":{"121":1,"130":4,"140":1,"141":1,"152":9,"153":4,"160":1,"161":1,"163":1,"170":1,"177":1,"181":1,"182":1,"183":2}}],["point",{"2":{"60":1,"64":1,"94":2,"108":1,"120":1,"130":1,"152":2,"153":1,"160":2,"182":1,"183":2}}],["pose",{"2":{"153":1}}],["positive",{"2":{"82":1,"160":1,"167":1,"171":1,"175":1}}],["positions1",{"2":{"182":3}}],["positions3",{"2":{"182":2}}],["positions2",{"2":{"182":5}}],["positions",{"2":{"182":32,"183":10}}],["position",{"2":{"67":1,"182":9,"183":1}}],["pos",{"2":{"67":4,"182":1,"183":4}}],["post",{"2":{"164":2,"177":5,"182":1,"183":6}}],["posts",{"2":{"140":1,"164":1}}],["postorderdfs",{"2":{"55":3,"181":8}}],["postprocessor",{"2":{"64":8,"182":11}}],["postprocessing",{"0":{"47":1},"2":{"47":1,"63":1,"64":1,"182":3}}],["postprocess",{"2":{"45":1,"61":1,"64":3,"182":5,"183":7}}],["possibly",{"2":{"182":1}}],["possible",{"2":{"7":1,"55":1,"67":4,"124":1,"125":2,"152":1,"167":1,"175":1,"178":1,"181":1,"182":4,"183":6}}],["possess",{"2":{"13":1}}],["possessions",{"2":{"13":1,"35":1}}],["powerful",{"2":{"16":1,"22":1,"55":1,"56":1,"96":1,"108":2,"126":1,"150":1,"181":1,"182":1,"183":1}}],["powered",{"2":{"15":1,"72":1}}],["power",{"2":{"13":1,"114":1}}],["poor",{"2":{"4":1}}],["potentially",{"2":{"63":1,"64":2,"182":2,"183":2}}],["potential",{"2":{"2":2,"55":1,"63":1,"127":1,"171":1,"181":2}}],["pt",{"2":{"1":1,"9":6,"16":2,"23":2,"24":3,"25":1,"26":2,"27":1,"28":1,"29":4,"30":2,"31":2,"32":1,"35":2,"37":5,"39":1,"41":2,"42":6,"47":1,"55":6,"64":10,"87":1,"90":5,"91":4,"94":2,"96":2,"97":4,"107":12,"108":3,"181":19,"182":30,"183":77}}],["palm",{"2":{"183":1}}],["packed",{"2":{"182":8}}],["pack",{"2":{"182":9,"183":1}}],["packages",{"2":{"9":2,"14":2,"55":1,"58":1,"114":1,"168":1,"169":1,"180":1,"182":2,"183":4}}],["package",{"2":{"1":1,"2":1,"9":10,"11":1,"32":1,"37":1,"55":1,"56":1,"70":1,"78":1,"99":2,"102":1,"106":1,"114":1,"181":1,"182":5,"183":9}}],["payout",{"2":{"171":1}}],["payload",{"2":{"108":1}}],["paying",{"0":{"82":1},"2":{"82":1}}],["pay",{"2":{"79":1,"82":3}}],["painting",{"2":{"67":1,"183":1}}],["pair",{"0":{"5":1,"6":1},"2":{"182":2,"183":5}}],["pairs",{"0":{"4":1},"2":{"3":1,"6":1,"57":1,"64":3,"182":3,"183":3}}],["padding",{"2":{"67":1,"182":1,"183":6}}],["padawan",{"2":{"13":1,"35":1,"183":1}}],["pauses",{"2":{"41":1}}],["paper",{"2":{"22":2,"55":2,"125":1,"130":1,"181":2,"182":1}}],["page",{"2":{"8":1,"9":1,"32":1,"47":1,"69":1,"76":1,"77":1,"183":2}}],["pages",{"2":{"2":3,"12":1,"178":1}}],["paris",{"2":{"71":1,"182":3}}],["parents",{"2":{"181":1}}],["parent",{"2":{"64":1,"98":1,"181":4,"182":41,"183":17}}],["param2",{"2":{"182":1}}],["param1",{"2":{"182":1}}],["parameter",{"2":{"64":1,"107":1,"108":3,"181":1,"182":5,"183":5}}],["parameters",{"2":{"2":1,"6":2,"7":3,"11":2,"55":1,"64":16,"91":1,"106":2,"108":5,"181":3,"182":29,"183":15}}],["paragraphs",{"2":{"67":2,"95":1,"183":2}}],["paragraph",{"2":{"67":3,"95":1,"182":2,"183":3}}],["parallelism",{"2":{"58":1}}],["parallel",{"2":{"55":1,"58":7,"64":1,"181":1,"182":6}}],["paralellize",{"2":{"46":1}}],["parts",{"2":{"57":1,"141":1,"153":1,"181":1,"183":1}}],["particular",{"2":{"64":1,"65":1,"171":1,"182":1,"183":1}}],["particularly",{"2":{"55":1,"67":2,"144":1,"145":1,"147":1,"181":2,"183":4}}],["partially",{"2":{"183":1}}],["partial",{"2":{"9":2,"58":1,"97":2,"182":2,"183":2}}],["part",{"2":{"13":1,"55":1,"63":2,"64":2,"152":2,"181":1,"182":2,"183":6}}],["parseable",{"2":{"93":2}}],["parses",{"2":{"55":1,"183":1}}],["parser",{"2":{"55":1,"183":1}}],["parse",{"2":{"21":1,"55":2,"82":1,"181":1,"183":7}}],["parsed",{"2":{"8":1,"55":6,"108":1,"183":8}}],["parsing",{"2":{"7":1,"20":1,"55":6,"181":1,"183":8}}],["patience",{"2":{"41":1}}],["pathways",{"2":{"167":1,"175":1}}],["path=",{"2":{"21":1,"43":1,"183":5}}],["path",{"2":{"13":1,"35":2,"41":1,"43":2,"183":33}}],["paths",{"2":{"4":1,"64":4,"91":1,"182":6,"183":2}}],["patterns",{"2":{"183":3}}],["pattern",{"2":{"1":1,"12":1,"183":4}}],["past",{"2":{"92":2,"130":5,"181":2,"183":1}}],["paste",{"2":{"2":1}}],["passage",{"2":{"124":3,"125":3,"182":2}}],["passages",{"2":{"112":7,"182":1}}],["passtroughtagger",{"2":{"64":1,"182":1}}],["passthroughtagger",{"2":{"64":3,"182":6,"183":1}}],["passthrough",{"2":{"63":1,"182":3,"183":1}}],["pass",{"2":{"55":1,"60":1,"62":3,"64":4,"92":1,"181":1,"182":22,"183":10}}],["passes",{"2":{"22":1,"54":1,"182":4}}],["passed",{"2":{"6":1,"7":4,"55":2,"58":1,"60":2,"64":9,"93":1,"181":8,"182":9,"183":8}}],["passing",{"0":{"62":1},"2":{"0":1,"55":1,"181":1}}],["pragmatic",{"2":{"140":1,"141":1,"142":1}}],["practics",{"2":{"183":1}}],["practically",{"2":{"181":1}}],["practical",{"2":{"72":1,"108":1,"161":1,"163":1,"170":1,"182":1,"183":1}}],["practices",{"2":{"58":6,"64":1,"142":1,"182":6}}],["practice",{"2":{"4":1,"7":1,"107":1}}],["pristine",{"2":{"183":1}}],["primary",{"2":{"183":1}}],["pricing",{"2":{"82":1}}],["price",{"2":{"82":1}}],["privacy",{"0":{"76":1},"2":{"74":1}}],["principles",{"2":{"60":1}}],["printed",{"2":{"64":1,"182":4,"183":2}}],["printstyled",{"2":{"182":1}}],["prints",{"2":{"55":1,"64":1,"181":1,"182":3,"183":2}}],["println",{"2":{"55":5,"181":2,"183":7}}],["printing",{"2":{"11":1,"57":1,"58":2,"182":1,"183":1}}],["print",{"2":{"9":1,"52":3,"55":6,"58":1,"64":4,"67":1,"181":8,"182":21,"183":37}}],["priority",{"2":{"183":1}}],["prioritizing",{"2":{"144":1,"145":1,"147":1}}],["prioritize",{"2":{"114":1,"120":1,"153":1,"167":1,"168":1,"169":1,"175":1,"181":2}}],["prior",{"2":{"37":1,"182":1,"183":1}}],["pr",{"2":{"9":1}}],["pretend",{"2":{"183":1}}],["prettify",{"2":{"182":1}}],["pretty",{"2":{"11":1,"55":1,"57":1,"58":2,"64":3,"97":1,"181":1,"182":7,"183":5}}],["predicts",{"2":{"171":1}}],["prediction",{"2":{"171":6}}],["predictions",{"2":{"171":2}}],["pre",{"2":{"97":1,"105":1,"182":1,"183":2}}],["prerequisites",{"0":{"69":1}}],["preprocessor",{"2":{"182":2}}],["preprocessed",{"2":{"182":1}}],["preprocess",{"2":{"182":4,"183":1}}],["prepend",{"2":{"92":1}}],["prepended",{"2":{"55":1,"183":2}}],["prepayment",{"2":{"69":1}}],["preparing",{"2":{"120":1}}],["prepare",{"2":{"61":1,"62":1,"64":1,"182":2}}],["prepared",{"2":{"55":1,"181":1}}],["preparation",{"2":{"60":1,"63":1,"182":11}}],["prefill",{"2":{"183":3}}],["prefix",{"2":{"55":3,"83":2,"183":5}}],["prefences",{"2":{"183":1}}],["prefer",{"2":{"46":1,"67":2,"97":1,"158":1,"168":1,"169":1,"183":2}}],["preferencesfor",{"2":{"183":1}}],["preferences",{"0":{"85":1},"2":{"24":2,"26":2,"42":1,"78":1,"83":1,"85":3,"90":2,"183":36}}],["preference",{"2":{"0":1,"183":5}}],["preorderdfs",{"2":{"55":1,"181":3}}],["prev",{"2":{"182":1}}],["prevent",{"2":{"12":1,"81":1,"183":1}}],["previously",{"2":{"67":1,"94":1,"95":1,"105":1,"130":1,"182":3,"183":1}}],["previous",{"2":{"9":1,"55":4,"71":1,"92":1,"103":2,"117":1,"118":1,"130":2,"152":1,"156":1,"181":5,"183":5}}],["previews",{"2":{"183":2}}],["preview",{"2":{"9":3,"14":2,"16":2,"83":2,"94":2,"181":3,"183":10}}],["precedence",{"2":{"152":1,"156":1,"183":1}}],["preceding",{"2":{"2":1,"182":1}}],["precision",{"2":{"126":1,"152":1,"178":1}}],["precisely",{"2":{"152":2,"156":1,"160":1,"166":1}}],["precise",{"2":{"9":3,"105":1,"107":2,"150":1,"157":1,"161":1,"163":1,"165":1,"170":1,"173":1}}],["precompile",{"2":{"78":1}}],["precompiled",{"2":{"78":1}}],["precompilation",{"2":{"78":3}}],["present",{"2":{"126":1,"153":1,"154":1,"183":4}}],["preserve",{"2":{"67":1,"182":1,"183":1}}],["preserving",{"2":{"67":2,"183":2}}],["preset",{"2":{"0":1,"183":3}}],["press",{"2":{"9":2}}],["proposed",{"2":{"181":1}}],["propertynames",{"2":{"183":5}}],["property",{"2":{"182":6,"183":6}}],["properties",{"2":{"55":2,"98":1,"108":3,"181":1,"182":1,"183":8}}],["proper",{"2":{"55":1,"181":1,"182":3,"183":1}}],["properly",{"2":{"8":1}}],["professional",{"2":{"140":1,"160":1}}],["proficient",{"2":{"9":2}}],["project",{"2":{"94":2,"96":1,"183":1}}],["projects",{"2":{"85":1}}],["prototyping",{"2":{"94":1}}],["proxy",{"2":{"91":1,"181":1}}],["programming",{"2":{"58":2,"142":1,"164":1}}],["programmatically",{"2":{"55":1,"183":1}}],["programmer",{"2":{"9":4,"14":1,"104":1,"142":1,"163":1,"165":1,"166":1,"167":1,"168":1,"169":1,"173":1,"174":1,"175":1,"183":2}}],["program",{"2":{"55":2,"93":2,"181":2}}],["promising",{"2":{"55":1,"181":1}}],["prompting",{"2":{"183":1}}],["promptingtools",{"2":{"0":2,"1":4,"9":11,"11":2,"13":1,"14":2,"16":2,"20":1,"22":1,"23":1,"24":4,"25":3,"26":3,"27":1,"28":1,"29":1,"30":1,"31":1,"32":4,"37":3,"45":2,"46":1,"47":1,"48":1,"50":1,"51":2,"55":13,"56":2,"58":1,"64":6,"65":1,"66":1,"67":7,"69":1,"70":4,"78":5,"84":1,"85":3,"87":3,"88":1,"90":1,"92":4,"93":1,"94":5,"95":1,"96":1,"97":8,"98":3,"99":1,"106":1,"107":6,"108":2,"179":4,"180":3,"181":72,"182":283,"183":608}}],["promptengineerfortask",{"0":{"150":1}}],["prompt",{"0":{"94":1,"97":1,"105":1},"2":{"9":2,"11":2,"18":3,"24":2,"27":2,"31":2,"36":1,"42":3,"55":3,"60":1,"71":1,"72":1,"79":2,"90":2,"91":2,"93":2,"94":3,"97":3,"98":1,"100":1,"103":1,"105":2,"106":2,"108":8,"110":2,"112":2,"114":2,"115":2,"117":2,"118":2,"120":2,"121":2,"122":3,"124":2,"125":2,"126":2,"127":2,"128":2,"130":2,"131":2,"132":2,"134":2,"136":2,"137":2,"138":2,"140":2,"141":2,"142":2,"144":3,"145":3,"147":2,"149":5,"150":9,"152":3,"153":3,"154":1,"156":1,"157":2,"158":2,"159":2,"160":2,"161":2,"162":2,"163":2,"164":2,"165":2,"166":2,"167":2,"168":2,"169":2,"170":2,"171":2,"173":3,"174":3,"175":3,"177":3,"178":2,"181":3,"182":3,"183":265}}],["prompts",{"0":{"13":1,"14":1,"35":1,"41":1,"103":1},"2":{"9":5,"13":1,"14":2,"15":1,"18":2,"79":1,"80":1,"100":2,"103":1,"105":2,"183":10}}],["prob",{"2":{"183":4}}],["probabilities",{"2":{"183":1}}],["probability",{"2":{"171":4,"183":2}}],["probably",{"2":{"1":1,"12":2}}],["problems",{"2":{"67":2,"130":1,"166":1,"168":1,"169":1,"174":1,"183":2}}],["problem",{"2":{"41":1,"142":1,"166":1,"168":1,"169":1}}],["produce",{"2":{"36":1,"183":2}}],["production",{"2":{"180":1}}],["product",{"2":{"7":1,"17":1,"67":3,"183":5}}],["processed",{"2":{"67":1,"182":2,"183":2}}],["processes",{"2":{"64":1,"67":1,"181":1,"182":1,"183":1}}],["processor=rt",{"2":{"64":1,"182":1}}],["processor",{"2":{"64":11,"182":24}}],["process",{"2":{"22":1,"54":1,"64":2,"65":1,"79":2,"93":2,"96":2,"126":1,"128":1,"181":4,"182":8,"183":9}}],["processing",{"2":{"8":1,"58":2,"64":1,"67":1,"71":1,"93":1,"107":1,"181":1,"182":1,"183":5}}],["pro",{"2":{"15":1,"32":1,"33":1,"71":1,"72":2,"77":1,"183":2}}],["provide",{"2":{"2":2,"4":1,"5":2,"6":1,"7":2,"11":4,"13":1,"19":1,"20":2,"22":1,"24":2,"26":2,"30":1,"35":1,"37":1,"43":1,"49":1,"54":2,"55":11,"57":1,"58":3,"60":1,"62":1,"64":5,"72":1,"74":1,"78":1,"91":1,"92":1,"94":1,"102":1,"103":1,"105":2,"106":3,"108":6,"112":1,"114":1,"115":2,"117":3,"118":3,"120":1,"122":1,"126":1,"130":3,"138":1,"140":3,"141":4,"142":2,"149":1,"152":2,"158":1,"160":1,"162":1,"164":1,"181":16,"182":12,"183":63}}],["provides",{"2":{"2":1,"42":1,"51":1,"56":1,"65":1,"67":1,"114":1,"120":1,"153":1,"167":1,"168":1,"169":1,"175":1,"181":3,"182":2,"183":3}}],["provided",{"2":{"2":1,"7":1,"9":2,"11":3,"16":1,"18":2,"21":1,"22":2,"31":1,"52":1,"54":2,"55":7,"56":1,"58":2,"60":1,"63":3,"64":10,"66":1,"67":2,"94":1,"106":3,"107":1,"108":8,"110":2,"114":2,"115":2,"117":1,"118":1,"120":4,"121":9,"122":2,"124":2,"125":1,"127":1,"130":2,"132":1,"136":1,"137":3,"138":2,"140":1,"142":3,"144":1,"145":1,"147":1,"152":1,"154":2,"156":1,"158":2,"160":1,"162":1,"164":1,"166":2,"167":1,"171":2,"174":1,"175":2,"177":1,"178":1,"181":14,"182":42,"183":103}}],["provider",{"2":{"0":4,"27":2,"97":2,"101":1,"102":1,"107":1,"108":2,"183":1}}],["providers",{"0":{"0":1,"91":1,"101":1},"2":{"0":2,"24":1,"27":1,"64":1,"91":1,"97":1,"100":2,"101":2,"108":1,"182":4,"183":3}}],["providing",{"0":{"43":1},"2":{"0":1,"27":1,"28":1,"31":1,"43":1,"52":2,"64":2,"153":1,"181":2,"182":2,"183":1}}],["v3",{"2":{"182":3}}],["voyage",{"2":{"183":4}}],["voyager",{"2":{"75":1}}],["vocab",{"2":{"182":10}}],["vocabulary",{"2":{"64":1,"126":1,"182":8}}],["v2",{"2":{"37":1,"64":1,"182":1}}],["v1",{"2":{"28":1,"30":1,"31":1,"78":2,"182":2,"183":9}}],["v0",{"2":{"28":1,"108":1}}],["vcat",{"2":{"13":1}}],["vs",{"2":{"11":1,"55":2,"57":1,"64":1,"67":1,"100":1,"181":2,"182":2,"183":1}}],["vscodedisplay",{"2":{"9":2,"14":2,"183":4}}],["vscode",{"2":{"9":1,"12":1,"14":1,"183":2}}],["vidid",{"2":{"183":2}}],["video",{"2":{"152":3,"153":1}}],["videos",{"2":{"152":2,"153":2}}],["vibrant",{"2":{"67":1,"183":2}}],["visible",{"2":{"178":1}}],["visits",{"2":{"181":6}}],["visit",{"2":{"80":1}}],["vision",{"2":{"11":1,"106":1,"183":4}}],["visualize",{"2":{"183":1}}],["visualization",{"2":{"58":1}}],["visual",{"0":{"176":1},"1":{"177":1,"178":1},"2":{"69":1,"77":1}}],["viewers",{"2":{"183":2}}],["view",{"2":{"55":1,"181":1,"182":9,"183":2}}],["via",{"0":{"85":1},"2":{"2":1,"6":1,"13":1,"17":1,"21":1,"24":2,"26":2,"28":1,"42":1,"55":3,"58":1,"60":1,"63":1,"64":2,"66":1,"69":1,"78":1,"83":1,"89":1,"91":1,"101":1,"108":3,"110":1,"117":1,"118":1,"137":1,"144":1,"145":1,"147":1,"181":3,"182":9,"183":21}}],["vect",{"2":{"182":2}}],["vectorstore",{"2":{"128":1}}],["vectors",{"2":{"64":1,"182":5,"183":3}}],["vectorized",{"2":{"7":1}}],["vector",{"2":{"4":1,"9":3,"14":2,"17":1,"20":2,"31":1,"45":4,"47":1,"55":3,"58":1,"61":1,"64":18,"67":13,"92":1,"93":1,"94":4,"96":1,"97":4,"98":1,"102":1,"105":1,"107":4,"108":1,"181":5,"182":82,"183":175}}],["ve",{"2":{"37":1,"42":1,"79":1,"89":1,"107":1}}],["vegetable",{"2":{"18":1,"183":2}}],["verify",{"2":{"183":1}}],["verification",{"2":{"182":1}}],["versus",{"2":{"182":1}}],["version=",{"2":{"94":1,"183":1}}],["versions",{"2":{"78":1,"98":1}}],["version",{"2":{"7":1,"9":1,"14":2,"42":1,"54":1,"55":1,"58":1,"64":1,"72":1,"78":1,"89":1,"94":1,"110":1,"112":1,"114":1,"115":1,"117":1,"118":1,"120":1,"121":1,"122":1,"124":1,"125":1,"126":1,"127":1,"128":1,"130":2,"131":1,"132":1,"134":1,"136":1,"137":1,"138":1,"140":1,"141":1,"142":1,"144":1,"145":1,"147":1,"149":1,"150":1,"152":2,"153":1,"154":1,"157":1,"158":1,"159":1,"160":1,"161":1,"162":1,"163":1,"164":1,"165":1,"166":1,"167":1,"168":1,"169":1,"170":1,"171":1,"173":1,"174":1,"175":1,"177":1,"178":1,"181":1,"182":3,"183":10}}],["verbatim",{"2":{"154":3,"156":1}}],["verbosity",{"2":{"55":2,"64":1,"181":4,"182":2}}],["verbose=2",{"2":{"181":1}}],["verbose=true",{"2":{"64":1,"182":1,"183":3}}],["verbose=false",{"2":{"55":1,"181":1}}],["verbose",{"2":{"4":1,"6":1,"7":2,"11":1,"14":1,"22":1,"54":1,"55":6,"64":16,"106":1,"181":7,"182":68,"183":42}}],["very",{"2":{"11":1,"19":2,"22":1,"66":1,"79":1,"89":1,"106":1,"121":2,"161":1,"163":1,"166":1,"168":1,"169":1,"170":1,"174":1,"182":1,"183":2}}],["vararg",{"2":{"183":2}}],["varying",{"2":{"183":1}}],["variety",{"2":{"24":1,"26":1,"140":1}}],["various",{"0":{"10":1},"1":{"11":1,"12":1,"13":1,"14":1,"15":1,"16":1,"17":1,"18":1,"19":1,"20":1,"21":1,"22":1,"23":1,"24":1},"2":{"55":1,"60":1,"72":1,"170":1,"181":1,"182":2}}],["variables",{"2":{"9":1,"13":1,"14":1,"29":1,"55":2,"70":1,"71":1,"72":1,"78":1,"84":1,"94":1,"97":1,"114":1,"168":1,"169":1,"183":38}}],["variable",{"0":{"84":1},"2":{"0":1,"30":1,"31":1,"49":1,"55":2,"69":3,"83":1,"84":3,"92":1,"97":1,"98":1,"114":1,"142":1,"171":2,"183":14}}],["vanilla",{"2":{"108":1}}],["vanished",{"2":{"67":1,"183":1}}],["vast",{"2":{"67":1,"183":1}}],["valid",{"2":{"52":1,"64":1,"181":1,"182":3,"183":1}}],["validated",{"2":{"182":1}}],["validate",{"2":{"22":2,"54":1,"167":1,"175":1}}],["validation",{"0":{"22":1},"2":{"54":1}}],["value2",{"2":{"182":1}}],["value1",{"2":{"182":1}}],["value",{"2":{"7":2,"22":1,"54":1,"55":3,"114":1,"115":1,"168":1,"169":1,"171":2,"181":8,"182":2,"183":18}}],["values",{"2":{"6":1,"7":10,"55":1,"60":1,"114":1,"171":6,"181":1,"182":6,"183":11}}],["valuable",{"2":{"1":1}}],["vllm",{"2":{"0":1,"75":1,"183":1}}],["l99",{"2":{"183":1}}],["l424",{"2":{"183":1}}],["l341",{"2":{"183":1}}],["l116",{"2":{"183":1}}],["l170",{"2":{"183":1}}],["l1007",{"2":{"183":1}}],["l1009",{"2":{"183":1}}],["l123",{"2":{"42":1}}],["lucene",{"2":{"182":2}}],["lngpt3t",{"2":{"97":1}}],["l244",{"2":{"182":1}}],["l215",{"2":{"182":1}}],["l288",{"2":{"67":1,"183":1}}],["l252",{"2":{"67":1,"183":1}}],["lt",{"2":{"11":3,"37":1,"50":2,"61":1,"64":4,"67":1,"69":1,"83":1,"84":1,"89":1,"91":1,"94":3,"102":2,"103":1,"104":1,"105":3,"106":3,"108":1,"179":2,"181":1,"182":31,"183":29}}],["llava",{"2":{"42":1}}],["llamaindex",{"2":{"117":1,"118":1,"125":1,"127":1}}],["llama123",{"2":{"42":3}}],["llama2",{"2":{"37":1,"39":1,"42":2,"89":1,"90":1,"183":1}}],["llama",{"0":{"28":1},"2":{"0":1,"25":1,"28":3,"29":6,"75":2,"100":1,"101":1,"183":6}}],["ll",{"2":{"22":1,"24":1,"26":1,"32":1,"34":1,"54":1,"55":1,"58":1,"80":1,"81":1,"89":1,"99":1,"108":3,"130":1,"181":1,"183":5}}],["llmtextanalysis",{"2":{"94":1}}],["llms",{"2":{"11":1,"14":1,"20":1,"64":1,"88":1,"89":1,"103":2,"106":1,"108":1,"112":1,"182":2,"183":2}}],["llm",{"2":{"9":1,"11":3,"12":1,"14":1,"20":1,"23":1,"52":2,"55":5,"62":1,"64":2,"82":1,"91":2,"100":1,"101":1,"106":4,"137":1,"181":7,"182":3,"183":11}}],["laptop",{"2":{"108":1}}],["latter",{"2":{"75":1}}],["latency",{"2":{"182":3,"183":2}}],["latest",{"2":{"9":4,"14":1,"16":1,"28":1,"58":2,"76":1,"78":1,"89":1,"110":1,"117":1,"118":1,"130":1,"165":1,"173":1,"181":1,"183":8}}],["later",{"2":{"2":1,"4":1,"7":1,"14":1,"47":1,"81":1,"108":1,"182":2,"183":2}}],["launch",{"2":{"84":1,"89":1}}],["launching",{"2":{"69":1,"84":1}}],["launched",{"2":{"37":1}}],["lament",{"2":{"67":1,"183":1}}],["langchain",{"0":{"95":1},"2":{"67":3,"95":1,"128":1,"183":3}}],["languages",{"2":{"32":1,"58":1,"183":2}}],["language",{"2":{"9":6,"11":1,"14":3,"15":3,"22":1,"23":1,"52":1,"55":2,"58":6,"100":1,"101":1,"103":1,"106":1,"124":2,"126":1,"128":1,"164":2,"165":2,"166":2,"167":1,"168":2,"169":2,"173":2,"174":2,"175":1,"181":2,"182":3,"183":5}}],["lazily",{"2":{"55":1,"181":1}}],["lazy",{"2":{"11":4,"22":2,"52":5,"54":2,"55":5,"93":1,"106":6,"180":1,"181":20}}],["layers",{"2":{"28":1}}],["largeint",{"2":{"93":3}}],["large",{"2":{"23":1,"30":1,"55":1,"58":1,"64":1,"66":1,"67":1,"100":1,"101":1,"103":1,"167":1,"175":1,"181":2,"182":9,"183":2}}],["larger",{"2":{"7":5,"64":1,"66":1,"67":1,"72":2,"160":1,"182":1,"183":3}}],["last",{"2":{"22":3,"52":3,"54":4,"55":32,"80":1,"92":4,"93":4,"107":1,"108":2,"130":1,"181":36,"182":5,"183":40}}],["lastly",{"2":{"11":1,"55":1,"181":1,"182":1}}],["lake",{"2":{"20":2}}],["labeling",{"2":{"158":2}}],["labeled",{"2":{"154":1}}],["labels",{"2":{"19":1,"158":1,"177":1}}],["label",{"2":{"19":1,"136":3,"158":4,"171":3}}],["lawyer",{"2":{"7":4}}],["led",{"2":{"120":1}}],["leetcode",{"2":{"67":1,"183":1}}],["legend",{"2":{"58":1}}],["legacy",{"2":{"21":1,"183":2}}],["less",{"2":{"55":1,"160":1,"164":1,"181":1}}],["leveraging",{"2":{"183":2}}],["leverages",{"2":{"24":1,"26":1,"49":1,"108":1,"181":1}}],["leverage",{"2":{"14":1,"15":1,"22":1,"55":1,"58":1,"102":1,"108":1,"162":2,"164":2,"181":1,"183":2}}],["level",{"2":{"55":2,"60":3,"62":1,"64":2,"67":1,"164":1,"181":4,"182":4,"183":13}}],["leaves",{"2":{"181":2}}],["leave",{"2":{"164":1,"183":1}}],["leaving",{"2":{"67":2,"183":2}}],["leadership",{"2":{"154":1}}],["leads",{"2":{"35":1,"183":2}}],["leaf",{"2":{"64":2,"182":2}}],["least",{"2":{"63":1,"64":1,"154":1,"182":1,"183":3}}],["learn",{"2":{"183":1}}],["learned",{"2":{"13":1,"107":1}}],["learning",{"2":{"11":1,"52":1,"55":1,"58":1,"106":1,"181":1}}],["lengths",{"2":{"182":1}}],["length=20",{"2":{"67":1,"183":1}}],["length=13",{"2":{"67":1,"183":1}}],["length=10000",{"2":{"67":2,"183":2}}],["length=10",{"2":{"64":1,"67":2,"95":1,"182":3,"183":2}}],["length",{"2":{"7":1,"8":1,"9":1,"22":2,"28":1,"54":2,"55":32,"58":1,"64":4,"66":7,"67":35,"95":1,"140":1,"181":49,"182":30,"183":55}}],["left",{"2":{"6":1,"7":12,"55":1,"181":1,"183":2}}],["letters",{"2":{"177":1,"182":1}}],["letter",{"2":{"130":1,"131":1}}],["let",{"2":{"1":1,"2":2,"3":1,"5":1,"6":2,"7":2,"9":3,"13":1,"20":1,"21":1,"23":1,"24":1,"26":1,"30":1,"32":1,"37":1,"41":2,"54":1,"55":8,"58":3,"62":1,"64":1,"67":1,"79":1,"83":2,"92":1,"93":2,"94":1,"107":2,"108":7,"130":1,"131":1,"181":9,"182":5,"183":8}}],["lossless",{"2":{"182":1}}],["losses",{"2":{"181":1}}],["losing",{"2":{"41":1}}],["lot",{"2":{"65":1}}],["lower",{"2":{"55":3,"64":1,"67":1,"181":3,"182":1,"183":1}}],["lowercased",{"2":{"182":1}}],["lowercase",{"2":{"21":1,"55":2,"181":2,"182":1,"183":2}}],["low",{"2":{"55":1,"79":1,"181":1,"182":5,"183":8}}],["love",{"2":{"35":1}}],["location",{"2":{"20":3,"98":1,"183":13}}],["localserver",{"2":{"183":1}}],["localserveropenaischema",{"2":{"0":1,"183":5}}],["localpreferences",{"2":{"85":1,"183":2}}],["locally",{"2":{"25":1,"43":1,"64":3,"75":1,"100":1,"101":1,"108":1,"182":3,"183":2}}],["localhost",{"2":{"24":1,"28":1,"62":3,"64":3,"182":3,"183":7}}],["local",{"0":{"37":1},"1":{"38":1,"39":1,"40":1,"41":1,"42":1,"43":1,"44":1,"45":1,"46":1,"47":1},"2":{"0":2,"24":1,"43":1,"55":1,"74":1,"78":1,"82":1,"83":1,"90":1,"181":1,"182":1,"183":20}}],["loads",{"2":{"94":1,"183":5}}],["loading",{"2":{"64":1,"182":1,"183":1}}],["load",{"2":{"9":6,"24":1,"26":1,"56":1,"94":10,"105":1,"108":1,"182":3,"183":35}}],["loaded",{"2":{"9":1,"84":1,"182":2,"183":2}}],["longer",{"2":{"114":1,"160":1,"182":1,"183":1}}],["longest",{"2":{"66":6,"67":17,"183":17}}],["long",{"2":{"9":1,"24":2,"26":1,"27":1,"29":1,"55":3,"79":1,"91":1,"181":4,"183":3}}],["logprobs",{"2":{"183":4}}],["logged",{"2":{"64":1,"182":1}}],["logging",{"0":{"98":1},"2":{"55":2,"64":5,"181":6,"182":16,"183":6}}],["logical",{"2":{"142":1}}],["logically",{"2":{"142":1}}],["logic",{"2":{"64":5,"67":1,"102":1,"107":1,"142":1,"181":1,"182":9,"183":9}}],["logit",{"2":{"11":1,"19":1,"106":1,"183":12}}],["logo",{"2":{"21":1,"183":2}}],["logs",{"2":{"11":1,"64":2,"106":1,"182":4,"183":3}}],["log",{"2":{"7":1,"12":1,"64":1,"98":2,"182":1,"183":33}}],["loose",{"2":{"182":1}}],["loosely",{"2":{"103":1,"105":1,"130":1}}],["look",{"2":{"64":1,"79":1,"95":1,"107":1,"182":2,"183":1}}],["looking",{"2":{"24":1,"26":1,"58":3,"181":1,"182":1}}],["looks",{"2":{"16":1,"55":1,"63":1,"105":1,"182":2,"183":4}}],["lookups",{"2":{"182":1}}],["lookup",{"2":{"2":2,"8":1,"126":1,"182":3,"183":2}}],["loop",{"2":{"7":1,"9":1,"79":1,"181":2}}],["lifting",{"2":{"108":1}}],["lifecycle",{"2":{"181":1}}],["life",{"2":{"29":1,"83":2}}],["lightweight",{"2":{"182":1}}],["light",{"2":{"71":1,"108":1,"183":4}}],["libraries",{"2":{"58":1,"168":1,"169":1}}],["library",{"2":{"37":1,"89":1}}],["living",{"2":{"35":1,"183":1}}],["limitations",{"2":{"99":1,"183":2}}],["limits",{"0":{"81":1},"2":{"77":1,"79":4,"81":1,"181":1,"183":2}}],["limited",{"2":{"67":1,"171":1,"183":3}}],["limit",{"0":{"79":1},"2":{"15":1,"79":7,"81":4,"108":1,"182":6,"183":9}}],["listened",{"2":{"67":1,"183":1}}],["listed",{"2":{"7":2,"112":2}}],["list",{"2":{"9":1,"11":1,"37":2,"50":4,"57":1,"58":1,"67":1,"89":1,"106":1,"108":1,"130":3,"136":1,"138":1,"141":1,"142":1,"153":2,"179":4,"182":12,"183":25}}],["linux",{"2":{"170":3}}],["linuxbashexpertask",{"0":{"170":1}}],["links",{"2":{"183":8}}],["link",{"2":{"9":1,"37":1,"152":1,"183":1}}],["line",{"2":{"9":1,"12":1,"28":2,"84":1,"152":1,"153":1,"182":1,"183":3}}],["lines",{"0":{"2":1},"2":{"2":1,"55":3,"67":1,"182":2,"183":9}}],["linearalgebra",{"2":{"1":2,"17":2,"47":2,"56":1,"182":5,"183":5}}],["literate",{"2":{"8":1,"9":1,"47":1}}],["likelyhood",{"2":{"183":2}}],["likely",{"2":{"11":2,"79":1,"82":1,"83":1}}],["like",{"0":{"91":1,"95":1},"2":{"2":2,"6":1,"7":2,"9":2,"13":1,"14":1,"20":1,"22":1,"25":1,"30":1,"31":1,"32":1,"35":1,"39":1,"42":2,"52":2,"54":1,"55":4,"60":1,"63":1,"64":3,"67":1,"72":1,"91":1,"92":1,"93":1,"94":5,"98":4,"108":1,"114":1,"159":1,"167":1,"168":1,"169":1,"175":1,"181":5,"182":10,"183":25}}],["kw2",{"2":{"182":2}}],["kw",{"2":{"182":3}}],["kwarg",{"2":{"11":1,"55":1,"60":3,"78":1,"83":1,"92":1,"106":1,"181":1,"182":4}}],["kwargs`",{"2":{"62":1}}],["kwargs=",{"2":{"21":1,"24":1,"27":1,"28":1,"37":2,"55":3,"58":1,"60":1,"98":1,"106":2,"108":2,"181":3,"182":2,"183":10}}],["kwargs",{"0":{"86":1},"2":{"0":2,"11":3,"22":2,"29":1,"54":2,"55":15,"58":2,"60":2,"62":22,"64":81,"91":1,"94":1,"97":1,"98":2,"106":3,"108":2,"179":2,"181":33,"182":150,"183":193}}],["king",{"2":{"50":2,"179":2}}],["kinds",{"2":{"7":3}}],["knows",{"2":{"64":1,"102":1,"182":1}}],["knowing",{"2":{"18":1}}],["known",{"2":{"11":1,"32":1,"67":1,"95":1,"106":1,"183":4}}],["knowledge",{"2":{"9":4,"14":1,"16":1,"56":2,"110":1,"117":1,"118":1,"161":1,"163":1,"165":1,"170":1,"173":1,"183":2}}],["know",{"2":{"9":2,"13":1,"24":1,"26":1,"30":2,"31":1,"79":1,"93":1,"110":2,"117":2,"118":2,"168":1,"169":1,"183":1}}],["k=5",{"2":{"182":1}}],["k=5`",{"2":{"182":1}}],["k=100",{"2":{"64":1,"182":1}}],["k=",{"2":{"7":10}}],["k",{"2":{"2":1,"6":2,"7":2,"28":1,"37":1,"62":2,"64":7,"182":29}}],["kept",{"2":{"42":1}}],["keeping",{"2":{"181":1}}],["keeps",{"2":{"6":1,"183":2}}],["keep",{"2":{"2":1,"8":1,"22":1,"27":1,"28":1,"55":1,"114":1,"152":1,"153":1,"156":1,"181":2,"182":3,"183":1}}],["key1",{"2":{"183":1}}],["keylocal",{"2":{"183":1}}],["keypreset",{"2":{"183":1}}],["key=env",{"2":{"24":1,"26":1}}],["keywordsprocessor",{"2":{"64":1,"182":6,"183":1}}],["keywords",{"2":{"64":5,"114":1,"115":2,"126":3,"158":1,"182":27,"183":1}}],["keywordsindexer",{"2":{"64":1,"182":4,"183":1}}],["keyword",{"0":{"62":1,"86":1},"2":{"9":2,"11":5,"13":1,"15":1,"16":1,"43":2,"52":1,"55":5,"60":2,"62":2,"64":11,"72":1,"79":1,"91":1,"92":2,"94":1,"105":1,"106":5,"115":1,"126":1,"149":1,"181":11,"182":18,"183":65}}],["keys",{"2":{"7":1,"24":1,"27":1,"58":1,"107":1,"108":1,"182":10,"183":10}}],["key",{"0":{"77":1,"78":3,"84":1,"85":1,"100":1},"1":{"101":1,"102":1,"103":1,"104":1,"105":1,"106":1},"2":{"0":3,"6":1,"7":9,"8":1,"9":2,"24":5,"26":2,"27":3,"29":3,"30":1,"31":1,"32":3,"49":2,"50":4,"55":2,"64":2,"69":12,"77":3,"78":9,"83":3,"84":10,"85":5,"91":1,"99":1,"100":1,"107":1,"108":2,"124":1,"125":1,"140":1,"144":1,"145":1,"147":1,"152":3,"153":5,"158":1,"168":3,"169":3,"177":2,"179":5,"181":3,"182":17,"183":164}}],["uct",{"2":{"181":12,"183":1}}],["ultimately",{"2":{"93":1}}],["u>",{"2":{"67":1,"182":1,"183":6}}],["u>promptingtools",{"2":{"67":1,"182":1,"183":6}}],["uint16",{"2":{"181":1}}],["uint64",{"2":{"45":2,"182":7}}],["uint8",{"2":{"45":1}}],["utils",{"2":{"67":1,"183":1}}],["utilized",{"2":{"114":1,"183":1}}],["utilizes",{"2":{"0":1}}],["utilizing",{"2":{"58":1}}],["utility",{"2":{"52":2,"66":1,"95":1,"107":1,"153":1,"183":6}}],["utilities",{"0":{"65":1},"1":{"66":1,"67":1},"2":{"22":1,"51":1,"52":2,"54":1,"55":1,"56":1,"64":1,"65":1,"66":1,"67":2,"181":2,"182":2}}],["ut",{"2":{"20":1}}],["untyped",{"2":{"183":1}}],["until",{"2":{"22":3,"54":2,"55":2,"79":1,"181":1,"183":1}}],["unhealthy",{"2":{"183":1}}],["unable",{"2":{"183":1}}],["unanswered",{"2":{"67":1,"183":1}}],["unpack",{"2":{"182":4}}],["unbiased",{"2":{"158":1}}],["unchanged",{"2":{"182":1,"183":1}}],["unclear",{"2":{"141":2,"182":1}}],["uncommon",{"2":{"126":1}}],["unfortunately",{"2":{"108":2}}],["unwrapping",{"2":{"183":2}}],["unwraps",{"2":{"181":1,"183":1}}],["unwrap",{"2":{"98":1,"181":2,"183":11}}],["unnecessary",{"2":{"95":1,"120":1,"128":1}}],["unexpected",{"2":{"81":1}}],["unexported",{"2":{"51":1,"56":1,"87":1}}],["unlock",{"2":{"106":1}}],["unlike",{"2":{"80":1,"182":1}}],["unless",{"2":{"67":1,"83":1,"160":2,"168":2,"169":2,"183":5}}],["unspecified",{"2":{"183":6}}],["unspoken",{"2":{"67":1,"183":1}}],["unsuccessfully",{"2":{"55":1,"183":1}}],["unsafe",{"2":{"55":7,"181":2,"183":7}}],["unusable",{"2":{"37":1}}],["unique",{"2":{"64":1,"114":1,"181":1,"182":8,"183":10}}],["universal",{"2":{"55":1,"181":1}}],["union",{"2":{"20":3,"31":1,"55":11,"64":4,"67":1,"108":1,"181":17,"182":35,"183":138}}],["units",{"2":{"182":1}}],["unitrange",{"2":{"45":1}}],["unit",{"2":{"20":2,"82":1,"100":1,"104":1,"130":1,"167":2,"175":2,"182":5,"183":1}}],["unicode",{"2":{"1":2,"56":1,"182":3}}],["unknown",{"2":{"18":3,"137":2,"183":5}}],["un",{"2":{"9":1,"37":1,"183":1}}],["underscores",{"2":{"182":1}}],["understood",{"2":{"97":1,"142":1,"183":1}}],["understandable",{"2":{"121":1,"183":1}}],["understand",{"2":{"52":1,"55":1,"64":1,"99":1,"140":1,"142":1,"152":1,"153":1,"164":1,"182":1,"183":2}}],["understanding",{"0":{"86":1},"2":{"41":1,"99":1,"120":1,"140":1}}],["underlying",{"2":{"9":1,"11":1,"52":1,"55":1,"56":1,"106":1,"108":1,"158":1,"181":2,"182":5,"183":2}}],["under",{"2":{"2":1,"19":1,"24":1,"26":1,"28":1,"52":1,"58":1,"67":1,"94":2,"99":1,"107":2,"108":1,"183":7}}],["updating",{"2":{"183":1}}],["updates",{"2":{"58":1,"181":2,"182":1,"183":2}}],["updated",{"2":{"55":1,"181":4,"183":5}}],["update",{"2":{"21":1,"55":1,"78":1,"181":1,"182":1,"183":7}}],["upto",{"2":{"183":2}}],["upfront",{"2":{"62":1,"64":1,"182":1}}],["uppercase",{"2":{"182":1}}],["uppercased",{"2":{"114":1}}],["upper",{"2":{"55":2,"181":4}}],["uploads",{"2":{"21":1,"183":2}}],["upon",{"2":{"13":1,"55":3,"75":1,"153":1,"181":2,"183":2}}],["up",{"2":{"1":1,"8":1,"12":1,"16":1,"49":1,"55":2,"58":1,"63":1,"77":1,"80":1,"92":1,"93":1,"105":1,"107":1,"108":3,"110":1,"117":1,"118":1,"160":1,"181":3,"182":5,"183":4}}],["usable",{"2":{"100":1,"105":1}}],["usage",{"2":{"22":1,"36":1,"67":1,"76":1,"142":1,"181":1,"182":1,"183":5}}],["usd",{"2":{"79":1}}],["usually",{"2":{"55":1,"78":1,"105":1,"181":2,"183":4}}],["usual",{"2":{"29":1,"30":2,"31":2}}],["us",{"2":{"11":1,"22":2,"52":2,"54":2,"55":1,"58":1,"76":1,"106":1,"181":1}}],["using",{"0":{"9":1,"23":1,"24":1,"26":1,"27":1,"28":1,"29":1,"30":1,"31":1,"47":1,"72":1,"91":1},"2":{"1":4,"5":1,"7":2,"8":2,"9":4,"11":1,"24":1,"27":1,"37":1,"42":1,"47":1,"56":2,"57":1,"58":4,"63":3,"64":7,"67":6,"69":1,"70":2,"87":1,"90":1,"94":1,"95":1,"106":1,"108":2,"112":1,"114":1,"120":1,"130":1,"153":4,"167":2,"171":1,"174":1,"175":2,"181":8,"182":40,"183":39}}],["uses",{"2":{"11":2,"12":1,"14":1,"28":1,"52":1,"64":1,"95":1,"106":2,"131":1,"182":34,"183":9}}],["useful",{"2":{"11":1,"16":1,"18":1,"22":1,"54":1,"55":4,"64":4,"67":3,"89":1,"106":2,"117":2,"118":1,"144":1,"145":1,"168":1,"169":1,"173":1,"174":1,"175":1,"181":2,"182":16,"183":16}}],["users",{"2":{"164":1,"183":1}}],["user=",{"2":{"94":1,"98":1,"105":1,"183":3}}],["usermessagewithimages",{"2":{"104":1,"183":7}}],["usermessage",{"2":{"9":5,"11":1,"13":4,"35":1,"41":1,"52":1,"55":2,"92":2,"94":2,"97":3,"100":2,"104":2,"105":2,"106":1,"107":2,"181":14,"183":35}}],["user",{"2":{"9":1,"11":3,"13":2,"14":1,"16":1,"18":1,"35":1,"36":1,"41":1,"52":1,"55":10,"56":1,"63":1,"83":1,"94":3,"97":3,"100":1,"102":1,"104":3,"105":1,"106":4,"107":1,"108":5,"110":1,"112":1,"114":2,"115":1,"117":1,"118":1,"120":2,"121":4,"122":2,"124":2,"125":2,"126":3,"127":1,"128":3,"130":6,"131":2,"132":1,"134":2,"136":3,"137":1,"138":3,"140":10,"141":6,"142":11,"144":4,"145":4,"147":4,"149":5,"150":3,"152":2,"153":2,"156":2,"157":1,"158":2,"159":1,"160":6,"161":1,"162":1,"163":1,"164":1,"165":1,"166":1,"167":5,"168":2,"169":2,"170":1,"171":1,"173":1,"174":1,"175":9,"177":1,"178":1,"181":13,"183":68}}],["used",{"2":{"6":2,"7":1,"16":4,"17":1,"18":2,"19":1,"55":2,"64":6,"67":4,"76":2,"92":1,"98":1,"100":1,"130":1,"131":1,"171":1,"181":11,"182":25,"183":97}}],["use",{"2":{"0":1,"1":1,"2":5,"7":2,"8":3,"9":8,"11":3,"13":2,"14":4,"16":2,"17":1,"18":1,"19":3,"20":2,"21":1,"22":4,"23":2,"24":3,"25":1,"26":2,"27":1,"28":1,"29":4,"30":4,"31":4,"32":1,"33":1,"34":1,"37":1,"39":1,"42":1,"43":1,"46":1,"49":1,"50":1,"54":4,"55":9,"56":1,"57":1,"58":1,"60":1,"61":1,"62":1,"63":1,"64":46,"67":7,"71":3,"72":4,"74":2,"76":4,"79":1,"82":2,"83":6,"84":1,"85":1,"87":1,"88":1,"90":2,"91":1,"92":3,"93":4,"94":6,"96":2,"98":2,"99":1,"102":1,"106":2,"107":3,"108":6,"130":4,"132":1,"152":9,"153":2,"158":1,"160":3,"161":1,"163":1,"164":1,"166":1,"168":1,"169":1,"170":1,"171":1,"177":1,"179":1,"181":19,"182":85,"183":138}}],["urls",{"2":{"104":1,"183":3}}],["url=env",{"2":{"29":1}}],["url=provider",{"2":{"27":1}}],["url=",{"2":{"24":1,"28":1,"183":2}}],["url",{"2":{"0":3,"11":1,"21":2,"24":1,"27":4,"43":1,"62":3,"64":4,"91":3,"106":1,"179":1,"182":7,"183":49}}],["rules",{"2":{"182":1}}],["runtime",{"2":{"55":2,"181":2}}],["runs",{"2":{"55":2,"64":1,"78":1,"89":1,"181":1,"182":3,"183":2}}],["running",{"2":{"7":1,"9":1,"23":2,"24":1,"37":1,"58":1,"78":1,"89":3,"181":1,"183":2}}],["run",{"2":{"6":2,"7":3,"11":3,"15":1,"22":5,"23":1,"42":1,"52":3,"54":3,"55":12,"58":1,"61":1,"64":1,"69":1,"72":1,"78":1,"84":1,"85":1,"89":1,"93":2,"94":1,"97":5,"106":4,"108":2,"181":33,"182":11,"183":57}}],["ripple",{"2":{"67":1,"183":1}}],["river",{"2":{"67":1,"183":1}}],["right",{"2":{"7":11,"102":2,"108":2,"144":1,"181":1,"182":4,"183":1}}],["rm",{"2":{"9":1,"78":1}}],["rolls",{"2":{"182":1}}],["role=",{"2":{"183":8}}],["role",{"2":{"24":1,"27":1,"58":1,"97":2,"107":3,"153":1,"183":3}}],["root",{"2":{"55":3,"64":5,"181":11,"182":8,"183":1}}],["robust",{"2":{"22":1,"69":1,"74":1,"106":1}}],["robustness",{"2":{"14":1,"52":1}}],["row",{"2":{"7":3}}],["rows",{"2":{"6":1,"7":18,"182":5}}],["roughly",{"2":{"100":1,"182":3}}],["routines",{"2":{"64":2,"182":2}}],["routing",{"0":{"19":1},"2":{"19":1,"136":1,"138":1,"183":3}}],["routed",{"2":{"138":1}}],["route",{"2":{"138":1}}],["router",{"2":{"11":1,"106":1,"138":1}}],["routes",{"2":{"0":1}}],["rounds=3",{"2":{"182":1}}],["rounds=5",{"2":{"181":1}}],["rounds",{"2":{"55":1,"108":1,"130":1,"181":18,"182":1}}],["round",{"2":{"7":2,"181":5}}],["raises",{"2":{"183":1}}],["raised",{"2":{"55":1,"154":1,"183":2}}],["rainy",{"2":{"183":4}}],["rationale",{"2":{"182":2}}],["ratio",{"2":{"177":1,"181":1}}],["rating",{"2":{"6":1,"122":1,"182":3}}],["ratelimit",{"2":{"79":2}}],["rate",{"0":{"79":1},"2":{"79":3,"182":3}}],["rare",{"2":{"78":1}}],["radius",{"2":{"67":1,"182":1,"183":6}}],["raw=true",{"2":{"183":1}}],["raw",{"2":{"50":2,"55":1,"179":2,"183":1}}],["rand",{"2":{"55":2,"182":4,"183":3}}],["random",{"2":{"55":1,"181":2,"183":3}}],["range",{"2":{"30":1,"31":1,"67":1,"183":1}}],["ranked",{"2":{"182":5}}],["rankermodel",{"2":{"182":1}}],["ranker",{"2":{"182":1}}],["rankgptresult",{"2":{"182":4,"183":1}}],["rankgptreranker",{"2":{"182":4,"183":1}}],["rankgpt",{"2":{"112":3,"182":9}}],["rankings",{"2":{"182":2}}],["ranking",{"0":{"111":1},"1":{"112":1},"2":{"8":1,"60":1,"112":1,"182":15,"183":1}}],["rankanswer",{"2":{"7":1}}],["rank",{"2":{"6":1,"112":4,"182":40,"183":5}}],["ranks",{"2":{"2":1,"182":3}}],["ragjuliaqueryhyde",{"0":{"124":1}}],["ragjudgeanswerfromcontextshort",{"0":{"122":1}}],["ragjudgeanswerfromcontext",{"0":{"121":1},"2":{"6":1,"182":2}}],["ragwebsearchrefiner",{"0":{"118":1},"2":{"182":2}}],["ragextractmetadatalong",{"0":{"114":1}}],["ragextractmetadatashort",{"0":{"115":1},"2":{"64":1,"182":4}}],["ragrankgpt",{"0":{"112":1},"2":{"182":1}}],["ragresult",{"2":{"55":3,"58":3,"63":2,"64":6,"182":19,"183":1}}],["ragcreateqafromcontext",{"0":{"120":1},"2":{"64":1,"182":1}}],["ragconfig",{"2":{"58":1,"60":1,"62":1,"64":3,"182":10,"183":1}}],["ragcontext",{"2":{"6":1}}],["ragquerysimplifier",{"0":{"128":1}}],["ragquerykeywordexpander",{"0":{"126":1}}],["ragqueryoptimizer",{"0":{"127":1},"2":{"64":1,"182":3}}],["ragqueryhyde",{"0":{"125":1},"2":{"62":3,"64":1,"182":4}}],["ragdetails",{"2":{"64":1,"182":1}}],["raganswerrefiner",{"0":{"117":1},"2":{"64":2,"182":4}}],["raganswerfromcontext",{"0":{"110":1},"2":{"64":2,"182":4}}],["ragtoolsexperimentalext",{"2":{"182":1}}],["ragtools",{"0":{"1":1,"182":1},"1":{"2":1},"2":{"1":3,"11":1,"56":3,"60":1,"63":1,"64":6,"66":2,"180":2,"182":280,"183":138}}],["rag",{"0":{"1":1,"2":1,"56":1,"59":1,"61":1,"109":1},"1":{"2":1,"57":1,"58":1,"59":1,"60":2,"61":2,"62":3,"63":2,"64":1,"110":1},"2":{"1":3,"2":1,"6":1,"7":2,"8":1,"11":2,"56":3,"57":5,"58":3,"61":1,"63":1,"64":8,"106":1,"110":1,"114":1,"115":1,"117":1,"118":1,"120":1,"121":1,"122":1,"124":1,"125":1,"126":1,"127":1,"128":1,"180":1,"182":22}}],["r",{"2":{"2":1,"22":2,"54":2,"55":1,"58":1,"107":1,"181":1,"182":2}}],["rt",{"2":{"1":1,"4":3,"6":1,"56":1,"64":3,"182":21}}],["rewritten",{"2":{"183":2}}],["rejected",{"2":{"177":1,"183":1}}],["reveal",{"2":{"171":1}}],["revisions",{"2":{"141":1}}],["revise",{"2":{"140":3,"141":3,"142":3}}],["revised",{"2":{"127":1,"130":1,"183":2}}],["reviewing",{"2":{"183":2}}],["review",{"2":{"4":1,"8":1,"52":1,"130":1,"131":1,"140":1,"141":1,"142":1,"171":1}}],["reorganization",{"2":{"140":1}}],["reuse",{"2":{"94":1}}],["recipient",{"2":{"183":4}}],["reciprocal",{"2":{"182":8,"183":2}}],["recall",{"2":{"130":1,"168":1,"169":2}}],["record",{"2":{"183":1}}],["recorded",{"2":{"64":1,"182":1,"183":1}}],["recognized",{"2":{"182":1}}],["recognizes",{"2":{"105":1}}],["recommended",{"2":{"183":2}}],["recommendation",{"2":{"67":1,"183":1}}],["recommend",{"2":{"67":2,"95":1,"183":2}}],["recursive=true",{"2":{"78":1}}],["recursively",{"2":{"67":1,"182":1,"183":1}}],["recursivecharactertextsplitter",{"0":{"95":1},"2":{"67":3,"95":1,"183":3}}],["recursive",{"2":{"66":2,"67":8,"95":3,"130":1,"182":1,"183":11}}],["receiving",{"2":{"183":3}}],["receive",{"2":{"79":1,"81":1,"182":2,"183":3}}],["received",{"2":{"11":1,"55":1,"93":2,"106":1,"181":1,"183":9}}],["recent",{"2":{"181":2,"183":2}}],["recently",{"2":{"79":1}}],["reception",{"2":{"47":1}}],["removing",{"2":{"181":2,"182":1}}],["removes",{"2":{"181":1,"183":6}}],["remove",{"2":{"55":5,"67":1,"181":4,"182":1,"183":16}}],["reminder",{"2":{"168":1,"169":1}}],["remaining",{"2":{"79":2}}],["remembers",{"2":{"183":1}}],["remembered",{"2":{"183":1}}],["remember",{"2":{"2":1,"9":1,"11":1,"35":1,"52":1,"78":1,"94":1,"103":2,"106":1,"107":1,"140":1,"141":1,"142":1,"183":4}}],["requested",{"2":{"130":1,"168":1,"169":1,"174":1,"182":4,"183":3}}],["request",{"2":{"79":2,"82":1,"92":2,"93":1,"100":1,"103":1,"105":1,"107":1,"108":3,"130":3,"131":2,"141":4,"142":6,"183":25}}],["requests",{"0":{"80":1},"2":{"50":1,"79":7,"80":3,"179":2,"183":7}}],["requirement",{"2":{"108":2}}],["requirements",{"2":{"0":1,"140":1,"141":1,"142":4,"144":1,"145":1,"147":1,"182":1,"183":1}}],["required",{"2":{"56":1,"107":2,"108":7,"150":1,"181":1,"182":6,"183":13}}],["require",{"2":{"16":1,"120":1,"183":1}}],["requires",{"2":{"0":1,"29":1,"30":1,"31":1,"64":2,"65":1,"96":1,"124":1,"141":1,"182":7,"183":11}}],["reducing",{"2":{"64":1,"182":5}}],["reduces",{"2":{"182":2}}],["reduce",{"2":{"64":1,"182":4}}],["red",{"2":{"22":1,"54":1,"55":2,"58":1,"181":2}}],["react",{"2":{"183":1}}],["reach",{"2":{"102":1,"181":1}}],["reaching",{"2":{"67":1,"140":1,"183":1}}],["reason",{"2":{"183":9}}],["reasonable",{"2":{"182":3}}],["reasonably",{"2":{"182":1}}],["reasoning",{"2":{"144":3,"171":1,"183":5}}],["real",{"2":{"181":5,"182":12}}],["really",{"2":{"20":1,"91":1,"183":4}}],["readme",{"2":{"183":1}}],["reader=",{"2":{"182":1}}],["reads",{"2":{"182":2}}],["readability",{"2":{"140":1}}],["ready",{"2":{"12":1,"89":1,"91":1,"180":1,"183":1}}],["readtimeout",{"2":{"11":1,"106":1,"183":12}}],["read",{"2":{"4":1,"9":1,"12":1,"58":1,"80":1,"97":1,"108":5,"114":1,"122":1,"130":1,"131":1,"152":1,"154":1,"182":1,"183":1}}],["refusals",{"2":{"183":3}}],["refresh",{"2":{"130":1,"183":3}}],["ref",{"2":{"67":1,"183":1}}],["refining",{"2":{"64":2,"182":5}}],["refines",{"2":{"181":1,"182":3}}],["refined",{"2":{"117":3,"118":3,"182":1}}],["refinements",{"2":{"181":1}}],["refinement",{"0":{"116":1},"1":{"117":1,"118":1},"2":{"182":2}}],["refiner",{"2":{"64":13,"182":24}}],["refine",{"2":{"61":1,"63":2,"64":4,"117":6,"118":6,"127":1,"181":1,"182":17,"183":3}}],["reflections",{"2":{"140":1,"141":2,"142":1}}],["reflection",{"2":{"130":1,"131":1,"140":4,"141":4,"142":4}}],["reflecting",{"2":{"67":1,"130":1,"153":1,"183":1}}],["reflects",{"2":{"122":1}}],["reflect",{"2":{"13":1,"140":1,"141":1,"142":1}}],["referring",{"2":{"182":1}}],["referred",{"2":{"7":1,"29":1}}],["refers",{"2":{"11":1,"28":1,"52":1,"106":1,"181":1,"182":2,"183":2}}],["references",{"0":{"50":1,"55":1,"64":1,"67":1},"2":{"55":1,"60":1,"126":1,"181":1,"182":3}}],["reference",{"0":{"179":1,"180":1,"181":1,"182":1,"183":1},"2":{"4":1,"11":1,"86":1,"95":1,"97":1,"120":3,"182":12,"183":10}}],["refer",{"2":{"0":1,"91":2,"97":1,"130":1,"153":1,"183":6}}],["renders",{"2":{"183":5}}],["rendered",{"0":{"97":1},"2":{"97":1,"107":7,"182":1,"183":2}}],["rendering",{"2":{"14":1,"97":4,"102":1,"183":5}}],["render",{"2":{"9":2,"14":1,"97":6,"100":2,"102":1,"107":6,"183":24}}],["regarding",{"2":{"144":1,"145":1,"147":1}}],["regards",{"2":{"94":2,"183":2}}],["regardless",{"2":{"7":2}}],["region",{"2":{"32":1,"183":2}}],["regions",{"2":{"32":1,"183":3}}],["registration",{"2":{"29":1}}],["registry",{"2":{"24":1,"26":1,"37":2,"39":1,"42":1,"107":1,"183":29}}],["registers",{"2":{"183":1}}],["registering",{"2":{"183":3}}],["register",{"2":{"27":2,"28":2,"29":2,"30":2,"31":2,"42":1,"69":1,"90":2,"91":2,"98":3,"105":1,"182":1,"183":15}}],["registered",{"2":{"24":1,"26":1,"30":1,"31":1,"94":1,"183":1}}],["regenerate",{"2":{"22":1,"54":1}}],["regex",{"2":{"9":1,"20":1,"64":1,"182":6,"183":2}}],["repetition",{"2":{"120":1}}],["repeats",{"2":{"130":2}}],["repeat",{"2":{"22":1,"54":1,"130":1,"152":1,"153":1,"160":1}}],["repeated",{"2":{"22":1,"54":1,"130":1,"183":2}}],["repeatedly",{"2":{"11":1,"52":1,"106":1,"181":1}}],["repo",{"2":{"91":1}}],["report",{"2":{"183":1}}],["reports",{"2":{"64":1,"140":1,"182":1}}],["reported",{"2":{"36":1,"183":1}}],["rephrasing",{"2":{"64":6,"126":2,"127":1,"182":19}}],["rephrases",{"2":{"127":1,"128":1,"182":2}}],["rephraser",{"2":{"62":7,"64":11,"182":25}}],["rephrased",{"2":{"58":1,"63":1,"64":2,"126":1,"128":1,"182":9}}],["rephrase",{"2":{"8":1,"61":1,"62":2,"63":2,"64":1,"124":1,"125":1,"127":2,"128":1,"182":16,"183":3}}],["representation",{"2":{"182":1}}],["representative",{"2":{"7":1}}],["represented",{"2":{"182":8,"183":2}}],["represents",{"2":{"154":1,"182":1,"183":2}}],["representing",{"2":{"55":1,"64":3,"67":1,"121":1,"182":3,"183":90}}],["reply",{"2":{"55":1,"71":1,"92":1,"140":1,"141":1,"142":1,"181":1,"183":3}}],["replaces",{"2":{"182":1}}],["replaced",{"2":{"67":1,"107":1,"182":1,"183":3}}],["replacements",{"2":{"97":1,"183":6}}],["replacement",{"2":{"67":4,"107":1,"183":8}}],["replace",{"2":{"13":1,"64":1,"66":2,"67":3,"97":1,"100":1,"105":2,"107":3,"182":2,"183":5}}],["repl",{"2":{"9":2,"58":1,"72":1,"78":3,"87":1,"183":1}}],["resized",{"2":{"183":4}}],["resize",{"2":{"183":5}}],["rescore",{"2":{"182":8}}],["reserved",{"2":{"168":1,"169":1,"183":3}}],["reset",{"2":{"79":1,"181":2,"183":1}}],["resets",{"2":{"79":1}}],["researcher",{"2":{"154":1,"158":1}}],["research",{"2":{"58":1,"74":1,"114":1}}],["res",{"2":{"64":1,"182":1}}],["resolutions",{"2":{"183":1}}],["resolution",{"2":{"183":1}}],["resolved",{"2":{"183":1}}],["resolves",{"2":{"130":1}}],["resolve",{"2":{"64":1,"182":4}}],["resource",{"2":{"183":3}}],["resources",{"2":{"24":1,"26":1,"58":1,"69":2,"76":1,"77":1,"81":1,"82":1,"84":2}}],["resp",{"2":{"183":3}}],["respect",{"2":{"182":1}}],["respective",{"2":{"182":1}}],["respectively",{"2":{"55":1,"60":1,"67":1,"181":2,"183":1}}],["respond",{"2":{"11":1,"55":2,"92":1,"106":1,"112":1,"127":1,"136":2,"138":2,"181":3,"183":2}}],["responses",{"0":{"93":1},"2":{"9":1,"15":1,"93":1,"107":1,"141":1,"154":10,"156":2,"181":2,"183":13}}],["response",{"2":{"7":1,"11":5,"52":1,"60":1,"63":5,"64":5,"71":1,"79":1,"82":1,"92":4,"93":14,"104":2,"106":5,"107":5,"108":8,"121":1,"130":1,"140":1,"141":2,"142":1,"167":1,"179":1,"181":1,"182":8,"183":85}}],["restrictive",{"2":{"168":1,"169":1}}],["restricted",{"2":{"6":1,"7":1}}],["restart",{"2":{"42":1,"55":1,"181":1}}],["rest",{"2":{"20":1,"50":1,"70":1,"103":1,"179":1,"181":1}}],["resulting",{"2":{"182":5}}],["results",{"2":{"7":10,"14":1,"20":1,"50":8,"55":2,"58":1,"63":1,"112":1,"117":1,"118":12,"124":1,"125":1,"127":2,"128":1,"131":2,"132":1,"179":8,"181":6,"182":16,"183":2}}],["result",{"2":{"2":1,"7":1,"55":3,"57":1,"58":7,"61":1,"62":2,"64":22,"106":2,"108":8,"181":9,"182":70,"183":30}}],["retain",{"2":{"181":1,"183":2}}],["retrive",{"2":{"60":1,"182":1}}],["retries=3",{"2":{"108":1}}],["retries=2",{"2":{"93":1}}],["retries`",{"2":{"55":1,"181":1}}],["retries",{"2":{"22":6,"54":3,"55":22,"108":2,"181":26,"183":12}}],["retrieving",{"2":{"127":1,"182":1}}],["retrievable",{"2":{"64":1,"182":1}}],["retrieval",{"0":{"1":1},"1":{"2":1},"2":{"1":1,"6":3,"7":5,"56":1,"60":2,"63":2,"64":13,"126":3,"128":1,"180":1,"182":25,"183":2}}],["retrieves",{"2":{"64":1,"182":1}}],["retriever=advancedretriever",{"2":{"64":1,"182":1}}],["retriever",{"2":{"61":1,"62":6,"64":35,"182":44}}],["retrieved",{"2":{"57":1,"60":1,"63":1,"64":1,"182":3}}],["retrieve",{"2":{"11":2,"57":2,"58":4,"60":2,"61":3,"62":1,"63":1,"64":14,"105":1,"182":21,"183":1}}],["retrying",{"2":{"22":1,"52":2,"55":7,"93":2,"108":1,"181":7}}],["retry",{"2":{"22":3,"52":1,"54":3,"55":15,"108":6,"181":22,"183":12}}],["retryconfig",{"2":{"22":3,"52":1,"54":3,"55":10,"181":14,"183":1}}],["returning",{"2":{"182":1,"183":2}}],["returned",{"2":{"71":1,"108":2,"181":1,"182":3,"183":15}}],["returns",{"2":{"7":7,"11":5,"19":3,"55":12,"64":11,"66":1,"67":5,"78":1,"106":5,"107":2,"140":1,"141":1,"142":1,"179":1,"181":16,"182":47,"183":78}}],["return",{"2":{"2":1,"6":2,"7":1,"9":1,"11":4,"19":1,"20":4,"31":1,"50":1,"55":6,"58":3,"60":2,"64":11,"67":1,"82":1,"92":5,"93":5,"97":4,"98":1,"106":5,"108":11,"117":2,"118":3,"179":1,"181":13,"182":33,"183":106}}],["reranking",{"2":{"63":1,"64":8,"182":16}}],["reranker",{"2":{"60":2,"64":8,"182":19}}],["reranked",{"2":{"58":1,"182":5}}],["rerank",{"2":{"2":2,"8":2,"60":3,"61":1,"63":2,"64":5,"182":28,"183":2}}],["reload",{"2":{"94":3,"183":2}}],["relentless",{"2":{"67":1,"183":1}}],["releases",{"2":{"180":1}}],["release",{"2":{"35":1}}],["relevancy",{"2":{"112":1,"183":1}}],["relevance",{"2":{"6":1,"112":2,"121":2,"140":1,"182":8}}],["relevant",{"2":{"2":1,"57":1,"58":1,"60":1,"61":2,"63":3,"64":7,"70":1,"112":1,"114":1,"118":1,"121":2,"122":1,"126":3,"127":1,"128":1,"150":1,"152":1,"153":1,"181":2,"182":11,"183":1}}],["related",{"2":{"14":1,"80":1,"120":1,"126":2,"154":1,"182":2,"183":2}}],["relational",{"2":{"7":1}}],["rely",{"2":{"0":1}}],["re",{"2":{"1":1,"2":3,"7":1,"8":1,"9":11,"11":1,"13":1,"21":1,"23":2,"24":1,"26":1,"29":1,"31":1,"35":1,"41":1,"42":1,"52":1,"58":1,"60":2,"62":1,"64":1,"67":2,"78":1,"80":1,"83":1,"89":2,"98":2,"100":1,"104":1,"105":4,"106":1,"107":5,"108":3,"112":1,"114":1,"121":1,"122":1,"124":1,"131":1,"157":2,"161":1,"163":1,"164":1,"165":2,"170":1,"171":1,"173":2,"181":1,"182":16,"183":16}}],["gsk",{"2":{"183":1}}],["ggi",{"2":{"183":3}}],["gguf",{"2":{"28":1}}],["gnarled",{"2":{"67":1,"183":1}}],["glossy",{"2":{"183":1}}],["globally",{"2":{"52":1}}],["global",{"2":{"0":1,"42":1,"55":1,"183":3}}],["glittering",{"2":{"67":1,"183":1}}],["glasses",{"2":{"39":1}}],["glad",{"2":{"31":1}}],["gpu=99",{"2":{"37":2}}],["gpu",{"2":{"28":1,"37":1}}],["gpt4o",{"2":{"182":3}}],["gpt4v",{"2":{"21":2,"183":5}}],["gpt4",{"2":{"16":1,"71":1,"72":1,"183":5}}],["gpt4t",{"2":{"6":1,"7":2,"16":3,"18":1,"22":1,"54":1,"71":1,"182":2,"183":5}}],["gpt35",{"2":{"183":2}}],["gpt3t",{"2":{"107":1}}],["gpt3",{"2":{"16":1,"107":1,"183":5}}],["gpt",{"2":{"6":1,"7":1,"16":5,"71":2,"72":1,"98":4,"182":5,"183":21}}],["guidance",{"2":{"108":1}}],["guidelines>",{"2":{"175":2}}],["guidelines",{"2":{"140":1,"153":1,"160":3,"167":2,"168":1,"169":1,"175":1}}],["guides",{"2":{"80":1,"183":1}}],["guide",{"0":{"89":1},"2":{"23":1,"41":1,"55":2,"58":2,"69":1,"74":1,"84":2,"167":1,"175":1,"181":3,"183":2}}],["guarantees",{"2":{"93":1}}],["guardian",{"2":{"67":1,"183":1}}],["guardrails",{"2":{"55":1,"181":1}}],["guessed",{"2":{"55":1,"181":1}}],["guesser",{"2":{"55":3,"181":3}}],["guesses",{"2":{"22":1,"54":1,"55":2,"181":2}}],["guess",{"2":{"22":1,"54":2,"55":31,"181":31}}],["guessing",{"2":{"22":1,"54":2,"55":1,"181":1}}],["g",{"2":{"20":1,"55":2,"112":1,"136":1,"138":1,"141":1,"183":8}}],["giraffe",{"2":{"183":7}}],["github",{"2":{"67":1,"69":1,"78":1,"84":1,"112":1,"182":5,"183":6}}],["gitignore",{"2":{"12":3}}],["given",{"2":{"20":2,"57":1,"64":2,"67":4,"79":1,"117":2,"118":2,"127":1,"136":2,"138":2,"141":1,"150":1,"154":1,"158":1,"162":1,"164":1,"166":1,"177":1,"181":5,"182":8,"183":34}}],["give",{"2":{"9":1,"55":1,"75":1,"93":2,"181":1,"182":1}}],["gives",{"2":{"5":1,"14":1,"100":1,"101":1,"117":1,"118":1,"183":2}}],["grammer",{"2":{"183":1}}],["grammatical",{"2":{"140":1}}],["grammar",{"2":{"108":1,"140":1,"183":1}}],["gracefully",{"2":{"183":6}}],["grasp",{"2":{"127":1}}],["granularity",{"2":{"58":1}}],["grab",{"2":{"55":1,"181":1}}],["gratefully",{"2":{"13":1}}],["grins",{"2":{"41":2}}],["group",{"2":{"93":1,"182":4,"183":1}}],["grow",{"2":{"13":1}}],["groq",{"2":{"0":1,"183":6}}],["groqopenaischema",{"2":{"0":1,"183":2}}],["greater",{"2":{"182":1,"183":1}}],["greatingpirate",{"2":{"94":5,"183":7}}],["great",{"2":{"9":2,"12":1,"67":2,"71":1,"159":2,"183":2}}],["gt",{"2":{"7":14,"11":3,"16":1,"22":3,"37":1,"42":2,"54":2,"55":4,"61":16,"64":9,"67":1,"69":1,"72":2,"83":1,"84":1,"85":1,"89":1,"90":2,"94":3,"100":10,"102":1,"105":3,"106":3,"107":2,"108":5,"181":7,"182":15,"183":26}}],["gamma",{"2":{"181":4,"183":1}}],["game",{"2":{"22":1,"54":1,"55":2,"171":1,"181":2}}],["gaps",{"2":{"160":1}}],["gaze",{"2":{"67":1,"183":1}}],["gave",{"2":{"13":1}}],["gauge",{"2":{"9":1}}],["gain",{"2":{"6":1}}],["garbage",{"2":{"2":2}}],["goes",{"2":{"183":1}}],["goals",{"2":{"140":3,"141":1}}],["goal",{"2":{"127":1,"130":1,"183":2}}],["going",{"2":{"62":1,"108":1}}],["got",{"2":{"55":2,"181":2,"183":1}}],["gotchas",{"0":{"36":1},"2":{"55":1,"181":1}}],["good",{"2":{"4":1,"8":1,"9":2,"14":1,"55":1,"81":1,"93":1,"150":1,"167":1,"175":1,"181":1,"182":5,"183":3}}],["googlegenaipromptingtoolsext",{"2":{"183":1}}],["googlegenai",{"2":{"32":2,"183":1}}],["google",{"0":{"32":1},"1":{"33":1,"34":1,"35":1,"36":1},"2":{"0":1,"12":1,"32":3,"36":1,"75":1,"114":1,"183":9}}],["googleschema",{"2":{"0":1,"183":2}}],["golden",{"2":{"4":1,"8":1}}],["go",{"2":{"2":1,"13":1,"41":3,"64":1,"69":2,"77":1,"81":1,"89":2,"105":1,"167":1,"175":1,"182":2}}],["germany",{"2":{"182":2}}],["genai",{"2":{"82":1,"180":1}}],["gensym",{"2":{"64":2,"182":3}}],["genie",{"2":{"58":1}}],["general",{"0":{"148":1},"1":{"149":1,"150":1},"2":{"11":1,"24":1,"26":1,"106":1,"108":1,"114":1,"120":1,"153":2,"167":2,"168":1,"169":1,"175":3,"183":1}}],["generally",{"2":{"7":1,"108":1}}],["generator",{"2":{"61":1,"62":1,"64":24,"182":25}}],["generating",{"2":{"11":1,"51":1,"64":11,"106":1,"177":1,"181":2,"182":20,"183":18}}],["generativeai",{"2":{"48":1}}],["generative",{"2":{"1":1,"57":1,"65":1,"66":1}}],["generation",{"0":{"1":1,"33":1,"38":1},"1":{"2":1,"34":1,"35":1,"36":1,"39":1,"40":1,"41":1,"42":1},"2":{"1":1,"6":1,"54":1,"56":1,"60":2,"63":3,"64":3,"108":1,"120":1,"180":1,"181":2,"182":7,"183":25}}],["generated",{"2":{"6":1,"8":1,"9":1,"11":5,"19":1,"47":1,"52":1,"55":2,"57":1,"58":1,"64":5,"66":1,"67":1,"71":1,"76":1,"106":2,"120":2,"181":2,"182":10,"183":30}}],["generates",{"2":{"2":1,"11":1,"22":1,"63":3,"64":1,"106":1,"124":1,"125":1,"182":4,"183":5}}],["generate",{"0":{"4":1},"2":{"0":1,"2":1,"3":1,"7":1,"8":1,"11":4,"52":3,"55":1,"56":1,"57":3,"58":5,"60":2,"61":3,"63":2,"64":10,"66":2,"69":1,"93":2,"94":1,"96":1,"106":2,"108":3,"120":1,"124":1,"126":1,"141":1,"142":1,"150":1,"177":4,"181":3,"182":18,"183":34}}],["genericwriter",{"0":{"162":1}}],["generictopicexpertask",{"0":{"161":1}}],["generictranscriptcritic",{"0":{"141":1}}],["generic",{"2":{"4":1,"105":1,"120":1,"141":1,"157":1,"161":1,"162":1}}],["gestures",{"2":{"41":1}}],["getpropertynested",{"2":{"64":2,"182":5,"183":1}}],["getindex",{"2":{"55":1,"181":1}}],["getting",{"0":{"68":1,"78":2,"79":1,"80":1},"1":{"69":1,"70":1,"71":1,"72":1},"2":{"23":1,"152":1}}],["get",{"2":{"11":1,"22":1,"32":1,"46":1,"50":2,"54":2,"55":3,"56":1,"58":1,"61":5,"63":5,"64":11,"69":1,"71":1,"74":1,"77":2,"78":2,"83":1,"84":1,"85":1,"89":1,"90":1,"92":1,"93":4,"106":1,"107":1,"108":2,"179":2,"181":4,"182":60,"183":83}}],["gemini",{"2":{"0":1,"32":3,"33":2,"34":3,"35":1,"36":1,"183":11}}],["tf",{"2":{"182":1}}],["td",{"2":{"182":1}}],["tp",{"2":{"182":1}}],["tpl=pt",{"2":{"94":1,"183":2}}],["tpl",{"2":{"9":1,"94":2,"97":2,"183":2}}],["tsang",{"2":{"181":1}}],["tldr",{"2":{"164":1,"177":6}}],["tl",{"2":{"39":1}}],["tmixtral",{"2":{"30":2,"108":2}}],["tmps",{"2":{"14":1,"183":2}}],["tmp",{"2":{"9":1}}],["typically",{"2":{"183":3}}],["typing",{"2":{"21":1,"183":2}}],["typed",{"0":{"93":1},"2":{"70":1,"93":2}}],["type=fruit",{"2":{"183":1}}],["type=food",{"2":{"11":1,"31":1,"106":1,"108":1}}],["type=mymeasurement",{"2":{"183":5}}],["type=maybetags",{"2":{"182":1}}],["type=manymeasurements",{"2":{"20":1,"183":2}}],["type=currentweather",{"2":{"20":1}}],["types",{"2":{"13":1,"55":2,"60":2,"61":4,"63":1,"64":7,"87":1,"90":1,"93":4,"104":1,"106":1,"108":4,"168":2,"169":2,"181":2,"182":8,"183":16}}],["type",{"2":{"6":1,"11":1,"12":1,"16":1,"20":2,"55":3,"57":1,"58":1,"60":2,"62":2,"63":1,"64":3,"71":1,"93":2,"98":1,"100":1,"106":1,"108":21,"130":1,"168":1,"169":1,"181":8,"182":81,"183":153}}],["tiktokenizer",{"2":{"183":1}}],["titles",{"2":{"152":2,"153":2}}],["title",{"2":{"152":2,"153":1,"158":1,"164":1,"177":2}}],["tiniest",{"2":{"132":1}}],["tinyrag",{"2":{"182":2}}],["tiny",{"2":{"24":3,"26":3,"182":1,"183":2}}],["tier",{"0":{"83":1},"2":{"79":3,"83":1,"183":2}}],["timing",{"2":{"55":1,"181":1}}],["timed",{"2":{"55":1,"181":1}}],["timeout",{"2":{"55":3,"183":8}}],["timestamp",{"2":{"152":2,"153":3,"183":4}}],["timestamps",{"2":{"152":3,"153":2}}],["times",{"2":{"22":1,"52":2,"54":1,"55":3,"94":1,"181":4}}],["time",{"2":{"3":1,"7":1,"9":3,"11":1,"14":1,"22":1,"31":1,"36":1,"54":1,"55":3,"67":1,"76":1,"82":2,"84":1,"90":1,"96":1,"100":1,"103":1,"105":1,"106":1,"180":1,"181":4,"182":3,"183":29}}],["tired",{"2":{"20":1}}],["tips",{"2":{"22":1,"67":1,"152":2,"153":2,"183":2}}],["tip",{"2":{"15":1,"67":1,"71":1,"72":2,"77":1,"115":1,"183":1}}],["trove",{"2":{"94":2,"183":2}}],["troubleshooting",{"2":{"37":1}}],["treated",{"2":{"183":2}}],["treasure",{"2":{"94":2,"183":2}}],["trees",{"2":{"55":1,"181":3}}],["tree",{"2":{"19":2,"22":2,"52":1,"55":7,"64":2,"67":1,"93":1,"181":17,"182":4,"183":7}}],["traced",{"2":{"183":2}}],["tracemessage",{"2":{"183":1}}],["trace",{"2":{"183":2}}],["tracers",{"2":{"183":3}}],["tracerschema",{"2":{"98":7,"183":17}}],["tracer",{"2":{"183":102}}],["tracermessagelike",{"2":{"183":2}}],["tracermessage",{"2":{"98":2,"183":10}}],["tracing",{"0":{"98":1},"2":{"98":2,"183":16}}],["tracked",{"2":{"182":1,"183":1}}],["tracker",{"2":{"64":9,"182":28,"183":1}}],["tracking",{"2":{"181":1,"183":2}}],["tracks",{"2":{"64":1,"182":1,"183":1}}],["track",{"2":{"64":5,"181":1,"182":16,"183":1}}],["trained",{"2":{"152":1,"153":1,"158":1}}],["train",{"2":{"76":1}}],["training",{"2":{"76":1,"183":1}}],["trailing",{"2":{"183":2}}],["trail",{"2":{"67":1,"183":1}}],["transcripts",{"2":{"152":2,"153":2}}],["transcript",{"2":{"140":5,"141":6,"142":4,"152":7,"153":6,"158":6}}],["transcribe",{"2":{"21":2,"178":2,"183":8}}],["transformation",{"2":{"183":1}}],["transformations",{"0":{"123":1},"1":{"124":1,"125":1,"126":1,"127":1,"128":1},"2":{"125":1}}],["transform",{"2":{"64":1,"182":2,"183":1}}],["translates",{"2":{"183":2}}],["translate",{"2":{"15":1,"182":6,"183":2}}],["truncation",{"2":{"182":2}}],["truncated",{"2":{"182":1,"183":4}}],["truncates",{"2":{"181":1}}],["truncate",{"2":{"181":2,"182":17,"183":2}}],["truths",{"2":{"67":1,"183":1}}],["trusted",{"2":{"58":1}}],["true",{"2":{"4":1,"6":2,"7":1,"13":2,"18":3,"22":1,"35":1,"45":1,"50":1,"54":1,"55":32,"64":17,"93":4,"94":2,"97":2,"137":2,"179":1,"181":25,"182":58,"183":135}}],["tryparse",{"2":{"55":4,"93":2,"181":4,"183":1}}],["try",{"0":{"83":1},"2":{"19":1,"31":1,"41":1,"55":2,"64":1,"94":1,"108":4,"110":1,"117":1,"118":1,"124":1,"125":1,"181":1,"182":4,"183":9}}],["trying",{"2":{"13":1,"35":1,"41":1,"52":1,"181":1,"183":5}}],["trims",{"2":{"183":1}}],["trial",{"2":{"182":1}}],["tries",{"2":{"181":1,"182":3,"183":2}}],["triple",{"2":{"130":1,"131":1,"154":1,"183":1}}],["trivially",{"2":{"93":1}}],["trigram",{"2":{"64":3,"182":8,"183":1}}],["trigrams",{"2":{"64":6,"66":4,"182":23,"183":2}}],["trigramannotater",{"2":{"64":4,"182":10,"183":1}}],["triggers",{"2":{"55":1,"108":1,"181":2}}],["triggered",{"2":{"52":1,"55":1,"181":1}}],["trigger",{"2":{"11":1,"52":1,"55":2,"106":1,"181":3,"183":2}}],["tricks",{"2":{"83":2}}],["trick",{"2":{"11":1,"19":1,"41":1,"106":1,"183":5}}],["tell",{"2":{"171":1}}],["tedious",{"2":{"90":1}}],["tens",{"2":{"82":1}}],["tenth",{"2":{"80":1}}],["tends",{"2":{"95":1}}],["tend",{"2":{"11":1,"67":1,"75":1,"90":1,"106":1,"183":1}}],["terms",{"2":{"114":1,"126":2}}],["term",{"2":{"56":1,"114":1,"182":1}}],["terminal",{"2":{"28":1,"37":1,"58":1,"66":1,"69":2,"84":4,"89":1}}],["testing",{"2":{"183":5}}],["testechoopenaischema",{"2":{"183":2}}],["testechoollamaschema",{"2":{"183":2}}],["testechoollamamanagedschema",{"2":{"183":2}}],["testechogoogleschema",{"2":{"183":2}}],["testechoanthropicschema",{"2":{"183":2}}],["test`",{"2":{"130":1,"167":3,"175":3}}],["tests>",{"2":{"175":2}}],["testset`",{"2":{"167":1,"175":1}}],["testsets",{"2":{"167":1,"175":1}}],["testset",{"2":{"55":1,"130":1,"167":2,"175":2,"183":1}}],["tests",{"2":{"55":4,"130":1,"167":4,"175":4,"183":5}}],["test",{"2":{"42":1,"55":2,"64":5,"167":14,"171":1,"175":14,"182":11,"183":9}}],["teacher",{"2":{"120":1}}],["teach",{"2":{"41":1}}],["technical",{"2":{"114":2,"126":1}}],["technically",{"2":{"28":1}}],["technique",{"2":{"96":1}}],["techniques",{"2":{"58":1}}],["technology",{"2":{"41":1,"158":1}}],["tempdir",{"2":{"183":1}}],["temporary",{"2":{"183":1}}],["temperature=>float64",{"2":{"183":1}}],["temperature=0",{"2":{"11":1,"55":3,"98":1,"106":3,"108":2,"181":3,"183":3}}],["temperature",{"2":{"20":1,"181":2,"183":20}}],["temperatureunits",{"2":{"20":2}}],["templating",{"2":{"14":1,"72":1,"183":1}}],["template",{"0":{"94":1,"110":1,"112":1,"114":1,"115":1,"117":1,"118":1,"120":1,"121":1,"122":1,"124":1,"125":1,"126":1,"127":1,"128":1,"130":1,"131":1,"132":1,"134":1,"136":1,"137":1,"138":1,"140":1,"141":1,"142":1,"144":1,"145":1,"147":1,"149":1,"150":1,"152":1,"153":1,"154":1,"157":1,"158":1,"159":1,"160":1,"161":1,"162":1,"163":1,"164":1,"165":1,"166":1,"167":1,"168":1,"169":1,"170":1,"171":1,"173":1,"174":1,"175":1,"177":1,"178":1},"2":{"9":10,"14":5,"18":1,"21":1,"55":1,"60":2,"62":5,"64":17,"94":13,"97":1,"98":1,"105":7,"107":4,"126":1,"130":1,"131":1,"132":1,"140":1,"141":1,"142":1,"144":1,"145":1,"147":1,"149":1,"152":1,"153":1,"154":1,"159":1,"160":1,"168":1,"169":1,"181":9,"182":47,"183":129}}],["templates=true",{"2":{"183":1}}],["templates",{"0":{"105":1,"109":1,"111":1,"113":1,"116":1,"119":1,"123":1,"129":1,"133":1,"135":1,"139":1,"143":1,"146":1,"148":1,"151":1,"172":1,"176":1},"1":{"110":1,"112":1,"114":1,"115":1,"117":1,"118":1,"120":1,"121":1,"122":1,"124":1,"125":1,"126":1,"127":1,"128":1,"130":1,"131":1,"132":1,"134":1,"136":1,"137":1,"138":1,"140":1,"141":1,"142":1,"144":1,"145":1,"147":1,"149":1,"150":1,"152":1,"153":1,"154":1,"173":1,"174":1,"175":1,"177":1,"178":1},"2":{"9":13,"11":1,"14":2,"18":1,"72":1,"94":12,"97":2,"98":1,"100":1,"105":3,"106":1,"181":4,"183":48}}],["templated",{"0":{"14":1},"2":{"9":1,"18":1}}],["text=",{"2":{"182":1}}],["text1",{"2":{"67":1,"183":1}}],["text2",{"2":{"67":2,"183":2}}],["texts",{"2":{"64":3,"67":2,"140":1,"182":6,"183":2}}],["textchunker",{"2":{"64":1,"182":7,"183":1}}],["textanalysis",{"2":{"8":1}}],["text",{"0":{"33":1,"38":1,"65":1},"1":{"34":1,"35":1,"36":1,"39":1,"40":1,"41":1,"42":1,"66":1,"67":1},"2":{"2":3,"8":1,"11":6,"17":1,"20":2,"21":1,"30":1,"31":2,"55":3,"58":1,"64":9,"65":2,"66":7,"67":41,"95":2,"104":1,"106":4,"108":1,"114":7,"115":6,"140":8,"160":4,"162":1,"164":1,"177":2,"178":1,"181":1,"182":36,"183":84}}],["tuning",{"2":{"96":1,"183":1}}],["tune",{"0":{"96":1}}],["tuned",{"2":{"31":1}}],["tuple",{"2":{"45":1,"181":1,"182":5,"183":39}}],["tuples",{"2":{"19":1,"168":1,"169":1,"183":5}}],["turn",{"0":{"92":1},"2":{"35":2,"42":1,"72":1,"183":4}}],["turbo",{"2":{"7":1,"16":3,"71":2,"98":1,"107":1,"108":2,"183":11}}],["tutorials",{"2":{"58":2}}],["tutorial",{"2":{"8":1,"69":1,"70":1,"77":1}}],["t",{"2":{"6":1,"8":1,"9":1,"27":1,"28":1,"30":1,"37":1,"39":1,"55":2,"90":1,"103":1,"108":2,"110":3,"114":1,"115":1,"117":5,"118":3,"120":1,"152":2,"153":1,"154":1,"167":1,"169":1,"171":1,"175":1,"181":5,"182":23,"183":37}}],["tweak",{"2":{"2":2,"7":1,"55":2,"181":2}}],["two",{"0":{"2":1},"2":{"2":1,"5":4,"6":1,"7":7,"11":1,"17":1,"18":3,"22":1,"54":2,"55":2,"66":2,"67":2,"79":1,"93":5,"97":1,"107":1,"108":1,"130":1,"181":4,"182":6,"183":19}}],["taking",{"2":{"126":1,"128":1}}],["taken",{"2":{"183":2}}],["takes",{"2":{"79":1,"183":1}}],["take",{"2":{"7":1,"9":2,"102":1,"107":1,"131":1,"132":1,"152":1,"156":1,"174":1,"181":1,"183":1}}],["tapestry",{"2":{"67":2,"183":2}}],["target",{"2":{"64":2,"140":1,"162":1,"164":2,"171":2,"182":14}}],["tavilysearchrefiner",{"2":{"182":6,"183":1}}],["tavily",{"2":{"49":3,"50":1,"179":5,"180":1,"182":3,"183":5}}],["tall",{"2":{"20":2,"183":11}}],["tabular",{"2":{"14":1,"183":2}}],["table",{"2":{"7":2,"9":1,"82":1,"171":4}}],["tables",{"2":{"7":1}}],["task>",{"2":{"174":4}}],["tasked",{"2":{"126":1,"128":1}}],["task=",{"2":{"21":1,"183":2}}],["tasks",{"2":{"15":2,"16":1,"18":1,"20":1,"24":1,"26":1,"30":1,"34":1,"42":1,"46":2,"58":1,"64":2,"72":1,"74":1,"136":1,"138":1,"154":1,"159":1,"181":1,"182":8,"183":3}}],["task",{"0":{"151":1},"1":{"152":1,"153":1,"154":1},"2":{"9":1,"12":1,"20":1,"58":1,"96":1,"98":1,"104":1,"105":1,"108":3,"117":1,"118":1,"121":1,"124":1,"125":1,"126":2,"127":1,"128":1,"130":1,"131":1,"136":1,"138":1,"140":1,"141":1,"142":1,"149":1,"150":6,"158":1,"159":4,"166":7,"167":1,"168":6,"169":6,"171":5,"174":6,"175":1,"177":1,"178":4,"183":6}}],["tag2",{"2":{"64":1,"182":1}}],["tag1",{"2":{"64":1,"182":1}}],["tagging",{"2":{"64":2,"182":4}}],["tagger=opentagger",{"2":{"64":1,"182":1}}],["tagger",{"2":{"64":27,"182":43}}],["tag",{"2":{"2":1,"58":1,"63":1,"64":4,"182":30}}],["tags",{"2":{"2":3,"61":4,"63":8,"64":17,"144":2,"145":1,"174":3,"175":2,"182":108,"183":9}}],["tailored",{"2":{"1":1,"140":1,"150":1}}],["txtouterjoin",{"2":{"7":1}}],["txtrightjoin",{"2":{"7":1}}],["txtleftjoin",{"2":{"7":1}}],["txtinnerjoin",{"2":{"7":1}}],["txtin",{"2":{"7":1}}],["txtwe",{"2":{"7":1}}],["txtjulia",{"2":{"7":3}}],["txtdatabase",{"2":{"7":1}}],["txt",{"2":{"2":2,"5":1,"6":1}}],["tokyo",{"2":{"183":12}}],["tokenizer",{"2":{"183":1}}],["tokenizers",{"2":{"182":1}}],["tokenizes",{"2":{"182":2}}],["tokenized",{"2":{"182":2}}],["tokenize",{"2":{"66":2,"182":2,"183":1}}],["tokens=",{"2":{"183":1}}],["tokens=2500",{"2":{"21":1,"183":2}}],["tokens",{"2":{"12":1,"21":2,"23":1,"24":1,"26":1,"30":1,"31":1,"36":1,"64":2,"71":2,"72":1,"79":4,"82":1,"92":1,"182":21,"183":70}}],["token",{"2":{"11":1,"19":1,"29":1,"64":1,"82":1,"106":1,"181":1,"182":11,"183":64}}],["toml",{"2":{"85":1,"183":2}}],["touches",{"2":{"63":1}}],["total",{"2":{"58":1,"64":6,"181":1,"182":14,"183":1}}],["toy",{"2":{"55":1,"108":1,"181":1}}],["took",{"2":{"55":1,"108":1,"181":1,"182":1}}],["too",{"0":{"80":1},"2":{"17":1,"47":1,"55":1,"67":1,"108":1,"120":1,"152":2,"168":1,"169":1,"181":2,"183":5}}],["toolmessage",{"2":{"183":2}}],["tool",{"2":{"13":1,"22":1,"23":1,"106":1,"108":4,"144":4,"183":130}}],["tools",{"0":{"51":1,"56":1},"1":{"52":1,"53":1,"54":1,"55":1,"57":1,"58":1,"59":1,"60":1,"61":1,"62":1,"63":1,"64":1},"2":{"11":1,"57":1,"58":1,"67":2,"108":2,"183":34}}],["tone",{"2":{"9":1,"140":2,"160":1}}],["today",{"2":{"7":1,"23":1,"24":1,"26":1,"30":1,"40":1,"42":1,"92":1,"183":8}}],["topics",{"2":{"24":1,"26":1,"30":1,"31":1,"160":1}}],["topic",{"2":{"5":2,"6":1,"7":2,"158":6,"161":5,"164":5}}],["top",{"2":{"2":1,"6":2,"7":12,"55":1,"57":1,"58":1,"60":1,"62":4,"64":22,"90":1,"171":1,"181":1,"182":46,"183":6}}],["to",{"0":{"19":1,"83":1,"92":1,"93":1,"94":1,"97":1},"2":{"0":1,"1":3,"2":11,"3":1,"4":2,"5":4,"6":5,"7":17,"8":2,"9":20,"11":21,"12":13,"13":9,"14":6,"15":2,"16":4,"17":4,"18":3,"19":2,"20":6,"22":11,"23":1,"24":6,"25":1,"26":5,"27":1,"28":6,"29":7,"30":3,"31":6,"32":1,"34":5,"35":6,"36":1,"37":7,"39":3,"40":2,"41":2,"42":6,"43":1,"46":1,"47":1,"49":3,"50":9,"51":1,"52":16,"54":11,"55":100,"56":6,"57":9,"58":20,"60":9,"61":1,"62":4,"63":14,"64":159,"65":1,"66":14,"67":43,"69":9,"70":1,"71":4,"72":2,"74":1,"75":1,"76":7,"77":2,"78":2,"79":4,"80":3,"81":4,"82":2,"83":6,"84":5,"85":1,"87":2,"88":3,"89":3,"90":3,"91":6,"92":8,"93":18,"94":17,"95":2,"96":4,"97":11,"98":6,"99":3,"100":3,"101":1,"102":9,"103":6,"105":6,"106":21,"107":7,"108":31,"110":2,"112":4,"114":3,"117":8,"118":8,"120":4,"121":5,"122":2,"124":4,"125":3,"126":3,"127":6,"128":1,"130":10,"131":5,"132":2,"136":2,"138":5,"140":10,"141":5,"142":7,"144":2,"145":1,"147":1,"150":2,"152":14,"153":10,"154":6,"158":1,"160":1,"161":1,"162":2,"163":1,"164":5,"166":1,"167":4,"168":2,"169":2,"170":1,"171":5,"174":1,"175":4,"177":2,"179":10,"180":3,"181":146,"182":431,"183":843}}],["together",{"0":{"30":1},"2":{"0":1,"2":1,"5":3,"6":1,"7":3,"20":1,"30":3,"36":1,"64":1,"75":1,"98":1,"99":1,"108":6,"114":1,"182":2,"183":7}}],["togetheropenaischema",{"2":{"0":1,"30":2,"108":1,"183":2}}],["thomsonsampling",{"2":{"181":1}}],["thompson",{"2":{"181":3}}],["thompsonsampling",{"2":{"181":6,"183":1}}],["thoroughly",{"2":{"120":1}}],["thought",{"2":{"144":1,"166":1,"168":1,"174":1,"183":2}}],["though",{"2":{"108":1}}],["those",{"2":{"58":1,"64":1,"71":1,"144":1,"145":1,"147":1,"153":1,"182":1,"183":2}}],["than",{"2":{"28":1,"64":2,"67":2,"72":2,"108":1,"130":1,"160":1,"182":8,"183":8}}],["thanks",{"2":{"105":1,"181":2}}],["thank",{"2":{"13":1}}],["that",{"2":{"0":2,"3":1,"5":1,"6":3,"7":15,"9":6,"11":2,"14":2,"16":3,"17":1,"19":1,"20":1,"22":4,"23":3,"24":3,"26":2,"32":1,"36":2,"37":2,"42":2,"49":1,"51":1,"52":5,"54":4,"55":22,"56":2,"58":1,"60":3,"62":3,"63":1,"64":23,"67":6,"69":1,"70":1,"74":1,"77":1,"79":4,"80":1,"89":3,"92":2,"93":1,"94":1,"96":1,"97":2,"98":1,"99":1,"100":3,"101":2,"102":1,"103":2,"105":4,"106":3,"107":2,"108":10,"110":1,"112":1,"117":2,"118":2,"120":2,"122":1,"124":3,"125":2,"126":5,"128":1,"130":1,"136":2,"138":3,"140":1,"141":1,"142":3,"150":2,"152":3,"153":1,"154":2,"158":1,"160":1,"162":1,"164":2,"166":1,"168":2,"169":2,"171":2,"174":1,"177":1,"180":1,"181":25,"182":68,"183":103}}],["third",{"2":{"22":1,"54":1,"55":1,"181":1}}],["think",{"2":{"101":1,"108":1,"130":1,"131":2,"132":1,"162":1,"164":1,"166":1,"167":1,"174":1,"175":1,"182":1,"183":2}}],["thinking",{"2":{"22":2,"54":2,"55":3,"181":3}}],["things",{"2":{"13":1,"35":1}}],["this",{"0":{"6":1},"2":{"0":2,"1":2,"2":1,"3":1,"4":1,"6":2,"7":2,"8":1,"9":3,"11":1,"12":2,"14":1,"18":1,"22":2,"32":1,"35":1,"37":1,"41":2,"47":1,"49":2,"52":2,"55":10,"58":1,"60":1,"62":2,"64":6,"65":1,"67":4,"69":1,"70":1,"76":1,"78":2,"79":1,"80":2,"83":1,"84":1,"94":1,"98":1,"99":2,"104":1,"106":1,"107":2,"108":2,"124":1,"126":4,"127":1,"128":1,"130":1,"131":1,"132":1,"142":1,"152":2,"153":2,"154":3,"168":2,"169":2,"171":2,"180":2,"181":15,"182":32,"183":81}}],["throw==false",{"2":{"55":1,"181":1}}],["throw=true",{"2":{"55":2,"181":2}}],["thrown",{"2":{"55":1,"78":1,"181":1}}],["throw",{"2":{"55":4,"108":1,"181":4,"183":6}}],["throughout",{"2":{"70":1}}],["through",{"2":{"0":1,"7":1,"12":1,"35":1,"55":1,"67":1,"99":1,"114":1,"166":1,"167":1,"174":1,"175":1,"181":3,"182":2,"183":5}}],["thread",{"2":{"92":1,"98":2,"183":22}}],["threads`",{"2":{"58":1}}],["threads",{"2":{"46":1,"64":7,"182":28,"183":1}}],["threshold",{"2":{"64":1,"182":4}}],["three",{"2":{"13":1,"18":1,"60":1,"66":1,"90":1,"93":1,"140":2,"141":2,"142":2,"182":3,"183":4}}],["then",{"2":{"11":2,"12":1,"13":1,"20":1,"28":1,"37":1,"47":1,"55":2,"60":1,"63":1,"64":4,"67":1,"92":1,"93":2,"94":2,"98":1,"99":1,"106":1,"107":2,"181":1,"182":13,"183":14}}],["theory",{"2":{"7":1,"171":1}}],["their",{"2":{"7":2,"11":1,"24":1,"26":1,"31":1,"55":1,"58":2,"64":3,"67":1,"76":1,"102":1,"106":1,"112":3,"115":1,"136":1,"138":1,"140":1,"141":1,"181":2,"182":12,"183":14}}],["there",{"2":{"7":2,"9":1,"20":1,"24":3,"26":2,"27":1,"30":1,"31":1,"34":1,"37":1,"39":2,"40":1,"41":1,"42":2,"49":1,"55":5,"63":1,"64":2,"66":1,"67":3,"74":1,"75":1,"78":1,"83":1,"89":1,"90":1,"97":2,"101":1,"102":1,"104":1,"105":1,"107":2,"130":2,"131":1,"160":1,"181":5,"182":4,"183":18}}],["themselves",{"2":{"183":1}}],["themed",{"2":{"158":1}}],["theme",{"0":{"155":2,"156":2},"1":{"157":2,"158":2,"159":2,"160":2,"161":2,"162":2,"163":2,"164":2,"165":2,"166":2,"167":2,"168":2,"169":2,"170":2,"171":2,"172":2,"173":2,"174":2,"175":2},"2":{"154":6,"158":7}}],["themes",{"2":{"154":5,"158":1,"177":1}}],["them",{"2":{"2":4,"3":1,"6":1,"7":1,"8":1,"9":6,"11":1,"13":1,"17":1,"22":1,"24":2,"26":1,"27":1,"36":1,"37":1,"55":4,"58":1,"60":1,"62":1,"64":5,"66":1,"67":3,"82":1,"83":1,"101":1,"105":1,"106":2,"108":1,"130":1,"152":3,"167":1,"168":1,"169":1,"171":1,"175":1,"181":4,"182":6,"183":25}}],["they",{"2":{"1":1,"9":2,"11":1,"21":1,"22":2,"24":2,"26":1,"27":1,"55":4,"64":1,"66":1,"103":1,"106":2,"108":1,"141":1,"152":1,"153":1,"156":2,"171":2,"181":4,"182":3,"183":9}}],["these",{"2":{"0":1,"11":1,"16":1,"37":1,"55":1,"58":3,"64":2,"67":1,"79":1,"83":1,"106":1,"114":1,"120":1,"121":1,"140":1,"142":2,"144":1,"145":1,"147":1,"153":3,"162":1,"164":1,"181":1,"182":3,"183":5}}],["the",{"0":{"7":1,"80":1,"84":1,"85":1,"86":1,"90":1,"97":1},"2":{"0":13,"1":4,"2":24,"3":3,"4":2,"5":7,"6":9,"7":91,"8":6,"9":49,"11":39,"12":11,"13":8,"14":12,"15":2,"16":7,"17":5,"18":5,"19":6,"20":10,"21":7,"22":22,"23":5,"24":16,"25":1,"26":9,"27":8,"28":12,"29":6,"30":4,"31":9,"32":3,"33":2,"34":3,"35":5,"36":5,"37":4,"39":1,"41":6,"42":3,"43":3,"45":1,"46":1,"48":1,"50":15,"51":1,"52":33,"54":21,"55":194,"56":9,"57":13,"58":48,"60":34,"61":2,"62":6,"63":29,"64":247,"65":1,"66":14,"67":98,"69":6,"70":4,"71":14,"72":8,"74":3,"75":3,"76":5,"77":3,"78":15,"79":11,"80":3,"81":1,"82":4,"83":8,"84":7,"85":2,"87":1,"88":2,"89":3,"90":4,"91":9,"92":19,"93":18,"94":20,"95":3,"96":4,"97":19,"98":14,"99":5,"100":9,"101":4,"102":9,"103":5,"104":13,"105":9,"106":37,"107":35,"108":75,"110":6,"112":10,"114":10,"115":3,"117":26,"118":32,"120":17,"121":20,"122":5,"124":6,"125":6,"126":11,"127":7,"128":4,"130":31,"131":12,"132":2,"136":6,"137":2,"138":13,"140":21,"141":19,"142":26,"144":6,"145":4,"147":3,"149":1,"150":4,"152":14,"153":20,"154":17,"156":1,"157":1,"158":8,"160":15,"161":2,"162":7,"163":1,"164":9,"165":3,"166":5,"167":12,"168":6,"169":7,"170":1,"171":22,"173":4,"174":6,"175":13,"177":9,"178":2,"179":16,"180":3,"181":339,"182":841,"183":1653}}],["fn",{"2":{"183":2}}],["f1",{"2":{"182":2}}],["ffs",{"2":{"130":1,"131":1}}],["f2",{"2":{"55":2,"181":2,"182":2}}],["fmixtral",{"2":{"31":2}}],["f",{"2":{"22":2,"31":1,"54":2,"55":11,"108":1,"168":1,"169":1,"181":16,"183":7}}],["fences",{"2":{"183":2}}],["fence",{"2":{"130":1,"131":1,"183":2}}],["fear",{"2":{"41":1}}],["features",{"2":{"82":1,"142":1,"171":4,"183":2}}],["feature",{"2":{"18":1,"22":1,"171":7,"183":1}}],["february",{"2":{"183":1}}],["feb",{"2":{"31":1,"36":1}}],["feedbackfromevaluator",{"0":{"134":1},"2":{"181":3}}],["feedback",{"0":{"133":1},"1":{"134":1},"2":{"22":6,"52":6,"54":8,"55":55,"93":2,"108":5,"130":9,"131":4,"132":4,"134":5,"181":125,"183":4}}],["feel",{"2":{"24":1,"26":1,"31":1,"34":1,"42":1,"57":1,"66":1,"183":1}}],["feels",{"2":{"13":1}}],["feelings",{"2":{"13":1,"35":2,"41":2,"183":8}}],["fewer",{"2":{"182":1}}],["few",{"2":{"2":2,"6":1,"12":1,"91":1,"130":1,"152":1,"160":1,"181":2,"183":6}}],["flexibility",{"2":{"181":1}}],["flexible",{"2":{"55":2,"56":1,"181":2,"183":1}}],["fleming",{"2":{"120":3}}],["flowed",{"2":{"67":1,"183":1}}],["flow",{"2":{"61":4,"64":2,"93":1,"182":3,"183":5}}],["flows",{"2":{"35":1}}],["float",{"2":{"182":5,"183":1}}],["float32",{"2":{"58":4,"182":5}}],["float64",{"2":{"17":1,"20":1,"23":2,"45":2,"46":2,"47":2,"64":13,"181":1,"182":35,"183":26}}],["float64int64float64dict",{"2":{"7":1}}],["flashrank",{"2":{"182":4}}],["flashranker",{"2":{"182":3,"183":1}}],["flag",{"2":{"93":1,"181":2,"182":10,"183":3}}],["flags",{"2":{"16":1,"71":1}}],["flavors",{"2":{"11":1,"52":1,"106":1,"183":2}}],["flavor",{"2":{"0":1,"182":1,"183":21}}],["fruit",{"2":{"183":2}}],["friendly",{"2":{"30":1,"160":2}}],["francisco",{"2":{"20":1}}],["france",{"2":{"16":1,"64":2,"71":3,"107":4,"182":7}}],["frameworks",{"2":{"58":1}}],["frame",{"2":{"7":8}}],["frames",{"2":{"6":1,"7":7}}],["frequencies",{"2":{"182":1}}],["frequently",{"0":{"73":1},"1":{"74":1,"75":1,"76":1,"77":1,"78":1,"79":1,"80":1,"81":1,"82":1,"83":1,"84":1,"85":1,"86":1,"87":1,"88":1,"89":1,"90":1,"91":1,"92":1,"93":1,"94":1,"95":1,"96":1,"97":1,"98":1},"2":{"37":1,"183":1}}],["free",{"2":{"24":1,"26":1,"31":2,"34":1,"36":1,"42":1,"57":1,"66":1,"75":1,"79":2,"80":1,"82":1,"182":1,"183":5}}],["freedom",{"2":{"13":1}}],["french",{"2":{"15":1,"98":1,"183":3}}],["from",{"0":{"79":1,"87":1},"2":{"2":5,"3":1,"6":2,"7":13,"8":1,"11":6,"12":2,"13":2,"20":2,"21":1,"24":1,"25":1,"26":1,"31":1,"32":1,"35":1,"37":2,"41":1,"50":2,"52":1,"54":1,"55":15,"56":1,"57":4,"58":2,"60":2,"61":1,"63":2,"64":17,"66":2,"67":2,"69":1,"71":1,"77":1,"83":1,"84":1,"87":2,"92":1,"93":1,"104":3,"106":4,"107":1,"108":6,"114":1,"115":2,"117":1,"118":4,"120":2,"121":3,"122":1,"124":1,"125":1,"126":1,"127":1,"128":2,"130":2,"134":2,"136":1,"138":1,"142":1,"150":1,"152":1,"153":1,"154":3,"160":1,"168":1,"169":1,"171":2,"178":1,"179":2,"181":29,"182":57,"183":121}}],["fairly",{"2":{"183":1}}],["fail",{"2":{"55":1,"181":3,"183":4}}],["failure",{"2":{"22":1,"52":3,"54":1,"55":1,"181":3}}],["failures",{"2":{"22":1,"54":1,"55":2,"181":1,"183":3}}],["fails",{"2":{"22":3,"52":1,"54":3,"55":2,"181":3,"183":8}}],["failedresponse",{"2":{"93":3}}],["failed",{"2":{"7":2,"20":1,"55":1,"93":2,"181":1,"182":1,"183":3}}],["favors",{"2":{"181":1}}],["favorite",{"2":{"89":1,"98":1}}],["far",{"2":{"181":2}}],["famous",{"2":{"164":1}}],["familiar",{"2":{"1":1}}],["faq",{"2":{"69":1,"76":1,"105":1}}],["fallback",{"2":{"183":7}}],["falls",{"2":{"55":1,"181":1}}],["fall",{"2":{"55":3,"181":3}}],["false`",{"2":{"55":1,"181":1}}],["false",{"2":{"2":1,"7":2,"18":2,"22":1,"50":3,"54":1,"55":13,"64":2,"137":2,"179":3,"181":14,"182":16,"183":77}}],["fahrenheit",{"2":{"20":1,"183":9}}],["faster",{"2":{"22":1,"28":1,"46":1,"54":1,"55":1,"64":1,"181":1,"182":1}}],["fast",{"2":{"19":1,"28":1,"182":1,"183":1}}],["face",{"2":{"42":1}}],["facilitating",{"2":{"183":2}}],["facilitate",{"2":{"11":1,"52":1,"55":1,"63":1,"106":1,"181":1,"183":2}}],["facing",{"2":{"16":1}}],["facts",{"2":{"169":1}}],["fact",{"2":{"11":1,"18":1,"52":1,"106":1}}],["focused",{"2":{"130":1,"153":1,"164":1}}],["focus",{"2":{"126":1,"160":1,"171":1,"181":1,"183":2}}],["focusing",{"2":{"58":1,"140":1}}],["four",{"2":{"18":1,"93":2,"183":2}}],["foundation",{"0":{"29":1},"2":{"29":1,"183":3}}],["found",{"2":{"7":1,"55":2,"64":2,"87":1,"181":1,"182":6,"183":22}}],["food",{"2":{"11":1,"31":5,"106":1,"108":24}}],["footers",{"2":{"2":1}}],["follow",{"2":{"91":1,"92":1,"130":1,"131":1,"140":2,"141":2,"142":2,"152":2,"154":1,"156":1,"160":4,"166":2,"171":1,"174":1,"183":1}}],["followed",{"2":{"9":1,"64":1,"182":1}}],["follows",{"2":{"7":1,"48":1,"51":1,"56":1,"117":1,"130":1,"182":3,"183":1}}],["following",{"2":{"5":1,"7":2,"9":1,"12":1,"18":1,"55":1,"58":2,"66":1,"70":1,"87":1,"95":2,"100":1,"117":1,"118":1,"131":1,"160":1,"181":2,"182":1,"183":11}}],["folder",{"2":{"2":1,"12":3,"14":1,"94":4,"183":4}}],["forward",{"2":{"183":1}}],["forwarded",{"2":{"64":12,"182":16}}],["forget",{"2":{"182":1,"183":1}}],["forbidden",{"2":{"167":1,"175":1}}],["forum",{"2":{"81":1}}],["forefront",{"2":{"74":1}}],["forever",{"2":{"67":1,"183":1}}],["formulate",{"2":{"120":1}}],["form",{"2":{"76":1,"182":10,"183":1}}],["former",{"2":{"67":1,"183":1}}],["forms",{"2":{"55":1,"181":1,"182":1}}],["format=",{"2":{"183":2}}],["format=dict",{"2":{"108":2}}],["formatting",{"2":{"54":1,"63":1,"97":1,"144":1,"145":1,"147":1,"152":1,"153":1,"154":1,"160":1,"178":1,"182":3,"183":2}}],["formatted",{"0":{"143":1,"172":1},"1":{"144":1,"145":1,"173":1,"174":1,"175":1},"2":{"0":2,"96":1,"97":1,"102":1,"107":1,"108":1,"144":1,"145":1,"154":1,"173":1,"174":1,"175":1,"181":1,"182":1}}],["format",{"2":{"11":1,"22":1,"54":1,"55":2,"63":1,"93":1,"96":2,"102":1,"104":1,"106":1,"107":2,"108":3,"112":1,"115":1,"130":1,"140":1,"141":1,"142":1,"144":1,"145":1,"147":1,"150":1,"152":1,"153":1,"154":2,"158":2,"160":3,"162":1,"164":2,"181":2,"182":1,"183":16}}],["forth",{"2":{"41":1}}],["fortunately",{"2":{"6":1,"83":1,"108":1}}],["forces",{"2":{"144":1}}],["force=true",{"2":{"78":1}}],["force",{"2":{"9":1,"13":1,"19":1,"35":2,"41":1,"78":2,"183":3}}],["for",{"0":{"45":1,"82":1,"84":1,"89":1,"107":1,"108":1,"179":1,"180":1,"181":1,"182":1},"2":{"0":6,"2":6,"3":1,"4":2,"5":1,"6":2,"7":14,"8":1,"9":14,"11":9,"13":2,"14":2,"15":2,"16":2,"17":1,"18":3,"19":7,"20":2,"22":7,"23":1,"24":5,"26":2,"27":1,"30":5,"31":8,"32":2,"33":1,"35":2,"36":3,"37":3,"39":1,"41":2,"42":2,"47":1,"48":1,"49":1,"50":3,"51":1,"52":6,"54":5,"55":30,"56":1,"57":3,"58":28,"60":6,"62":3,"63":5,"64":65,"66":2,"67":12,"69":3,"71":3,"72":3,"74":2,"76":2,"77":1,"79":1,"80":1,"82":4,"83":4,"84":3,"86":1,"87":1,"88":1,"89":1,"90":1,"91":3,"93":5,"94":4,"95":3,"96":2,"97":4,"98":1,"99":1,"101":2,"102":2,"105":2,"106":10,"107":7,"108":13,"110":1,"114":2,"115":1,"117":1,"118":2,"120":4,"121":2,"122":1,"124":2,"125":1,"126":5,"127":2,"128":3,"130":3,"131":3,"136":2,"138":3,"140":1,"141":3,"142":2,"144":4,"145":4,"147":3,"149":2,"150":1,"152":2,"153":6,"154":3,"157":1,"158":1,"159":1,"160":8,"161":1,"162":2,"163":1,"164":2,"165":1,"166":1,"167":7,"168":4,"169":4,"170":1,"171":1,"173":2,"174":2,"175":8,"177":2,"179":3,"180":4,"181":65,"182":267,"183":407}}],["five",{"2":{"183":1}}],["fits",{"2":{"99":1,"136":1,"138":2}}],["fit",{"2":{"66":1,"67":2,"171":1,"182":3,"183":2}}],["fixes",{"2":{"181":1}}],["fixed",{"2":{"93":1,"181":1,"183":2}}],["fix",{"2":{"52":2,"55":1,"78":1,"130":1,"132":1,"181":4,"183":1}}],["fixing",{"0":{"54":1,"129":1},"1":{"130":1,"131":1,"132":1},"2":{"11":1,"51":1,"52":1,"55":4,"106":1,"108":1,"181":21}}],["field3",{"2":{"183":4}}],["field2",{"2":{"183":4}}],["field1",{"2":{"183":8}}],["fieldnames",{"2":{"108":1}}],["field",{"2":{"11":5,"52":1,"55":2,"64":1,"71":1,"106":6,"108":8,"181":3,"182":2,"183":24}}],["fields",{"2":{"9":1,"55":4,"60":1,"91":1,"93":1,"107":1,"108":2,"140":1,"141":1,"142":1,"181":6,"182":21,"183":39}}],["finished",{"2":{"183":2}}],["finish",{"2":{"141":2,"183":5}}],["finance",{"2":{"58":1}}],["finalizes",{"2":{"183":3}}],["finalize",{"2":{"183":18}}],["finally",{"2":{"63":1,"183":1}}],["final",{"2":{"6":1,"55":2,"58":2,"63":1,"64":5,"107":1,"121":1,"181":1,"182":21}}],["finetuning",{"2":{"96":5,"183":2}}],["fine",{"0":{"96":1},"2":{"31":1,"96":1,"183":1}}],["finders",{"2":{"182":1}}],["finder",{"2":{"64":10,"182":23}}],["findings",{"2":{"114":1}}],["finding",{"2":{"41":1,"64":2,"182":2}}],["find",{"2":{"2":1,"8":1,"12":1,"13":1,"14":1,"24":1,"26":1,"29":1,"35":1,"41":2,"52":1,"55":1,"58":5,"61":2,"63":3,"64":7,"66":2,"67":3,"98":1,"181":3,"182":51,"183":21}}],["finds",{"2":{"2":1,"64":1,"181":1,"182":14}}],["filled",{"2":{"183":2}}],["fill",{"2":{"76":1,"108":1,"160":1,"182":1,"183":14}}],["fills",{"2":{"7":1}}],["filechunker",{"2":{"182":4,"183":1}}],["filenames",{"2":{"64":1,"182":1}}],["filename",{"2":{"9":2,"96":1,"183":2}}],["file",{"2":{"4":1,"9":3,"12":6,"28":1,"32":1,"37":1,"64":4,"78":1,"84":1,"96":2,"98":1,"152":1,"153":1,"154":1,"182":10,"183":23}}],["files`",{"2":{"64":1,"182":1}}],["files",{"2":{"2":2,"9":1,"61":1,"64":6,"69":1,"77":1,"182":14}}],["filtered",{"2":{"58":1,"64":2,"182":4}}],["filtering",{"2":{"8":1,"18":1,"63":2,"64":2,"182":6}}],["filter",{"2":{"2":2,"7":1,"64":7,"114":2,"182":13,"183":1}}],["filters",{"2":{"2":2,"8":1,"63":1}}],["fired",{"2":{"167":1,"175":1}}],["firefunction",{"2":{"31":2}}],["fireworks",{"0":{"31":1},"2":{"0":1,"24":1,"27":1,"31":4,"75":1,"108":1,"183":2}}],["fireworksopenaischema",{"2":{"0":1,"31":2,"183":2}}],["first",{"2":{"1":1,"2":1,"6":1,"7":7,"9":2,"11":2,"14":1,"16":1,"17":1,"24":1,"26":1,"28":1,"37":1,"39":1,"41":1,"55":2,"60":2,"63":1,"64":2,"67":3,"78":2,"79":1,"93":1,"94":3,"97":2,"106":2,"107":1,"112":1,"144":1,"162":1,"164":1,"166":1,"168":1,"169":1,"174":1,"181":3,"182":14,"183":27}}],["fur",{"2":{"183":1}}],["furthermore",{"2":{"108":1}}],["further",{"2":{"67":2,"93":2,"181":1,"183":2}}],["fusion",{"2":{"182":8,"183":2}}],["fulfills",{"2":{"142":1}}],["fulfilling",{"2":{"130":1}}],["fully",{"2":{"58":1,"64":1,"93":1,"142":3,"182":1}}],["full",{"2":{"7":1,"64":3,"93":1,"182":10,"183":9}}],["fuzzy",{"2":{"67":1,"183":1}}],["functor",{"2":{"55":1,"181":2}}],["functionality",{"2":{"51":1,"56":1,"142":1,"167":1,"175":1,"180":3,"181":1,"182":3,"183":3}}],["functionalities",{"2":{"0":1,"142":1,"183":1}}],["function",{"0":{"47":1},"2":{"6":1,"7":2,"8":1,"9":1,"11":4,"13":1,"17":1,"18":1,"21":1,"22":5,"23":2,"31":1,"45":1,"47":1,"49":2,"50":1,"52":2,"54":6,"55":37,"58":2,"60":2,"61":1,"63":1,"64":20,"67":12,"78":2,"92":1,"93":2,"94":1,"96":1,"97":1,"102":1,"106":4,"108":10,"112":1,"114":1,"130":3,"142":1,"145":3,"147":3,"167":3,"175":3,"181":58,"182":42,"183":108}}],["functions",{"0":{"11":1,"106":1},"2":{"0":2,"2":1,"7":1,"9":1,"11":5,"16":1,"22":2,"24":1,"27":1,"37":1,"52":4,"54":1,"55":7,"57":1,"58":2,"60":5,"61":1,"63":1,"64":1,"66":2,"78":1,"87":1,"90":1,"92":1,"94":2,"97":1,"105":1,"106":5,"166":1,"167":1,"168":1,"169":1,"174":1,"175":1,"181":8,"182":12,"183":15}}],["func",{"2":{"55":2,"181":4,"182":1}}],["future",{"2":{"1":1,"4":2,"24":1,"58":1,"94":1,"108":1,"180":1,"181":1,"182":1,"183":2}}],["ml",{"2":{"171":1}}],["mm",{"2":{"152":2,"153":3}}],["mdoel",{"2":{"107":1}}],["mdash",{"2":{"50":1,"55":9,"64":6,"67":5,"179":2,"180":1,"181":37,"182":142,"183":184}}],["m1",{"2":{"37":1}}],["mcts",{"2":{"22":1,"52":1,"55":1,"181":1}}],["m",{"0":{"83":1},"2":{"22":2,"24":1,"26":1,"28":3,"30":1,"31":2,"34":1,"37":1,"42":1,"54":2,"55":3,"92":3,"108":1,"181":3,"183":5}}],["msg1",{"2":{"183":2}}],["msgs",{"2":{"183":1}}],["msg=aigenerate",{"2":{"183":7}}],["msg",{"2":{"13":2,"17":5,"20":4,"21":2,"23":4,"24":2,"26":1,"27":1,"29":1,"31":2,"35":1,"40":2,"41":1,"45":1,"46":1,"47":2,"55":12,"58":1,"64":2,"83":2,"93":4,"98":5,"107":3,"108":2,"181":9,"182":6,"183":152}}],["myfield",{"2":{"183":1}}],["myfunction",{"2":{"183":2}}],["mystruct",{"2":{"183":1}}],["myschema",{"2":{"91":2}}],["mytemplates",{"2":{"181":1}}],["mytype",{"2":{"93":1}}],["myaijudgemodel",{"2":{"182":1}}],["myadd",{"2":{"167":6,"175":6}}],["myabstractresponse",{"2":{"93":5}}],["mybool",{"2":{"93":2}}],["mymodel",{"2":{"91":1}}],["mymeasurementwrapper",{"2":{"183":1}}],["mymeasurement",{"2":{"20":5,"183":35}}],["myreranker",{"2":{"60":4,"64":2,"182":2}}],["my",{"0":{"97":1},"2":{"13":1,"14":1,"24":3,"26":2,"31":1,"34":1,"35":1,"41":1,"79":1,"89":1,"91":1,"92":3,"108":1,"183":14}}],["music",{"2":{"152":1,"153":1}}],["must",{"2":{"1":1,"13":2,"22":2,"37":1,"41":2,"54":2,"55":9,"64":1,"69":1,"94":3,"105":1,"108":3,"115":1,"117":1,"118":1,"121":1,"125":1,"130":3,"131":1,"136":2,"138":2,"152":1,"153":1,"154":2,"156":1,"160":1,"164":1,"167":2,"171":1,"175":2,"177":1,"180":1,"181":13,"182":8,"183":18}}],["murmured",{"2":{"67":1,"183":1}}],["mutates",{"2":{"64":1,"182":1}}],["mutated",{"2":{"64":1,"182":4}}],["mutating",{"2":{"55":1,"63":1,"64":1,"181":2,"182":2}}],["mutable",{"2":{"55":1,"183":3}}],["multihits",{"2":{"182":1}}],["multihop",{"2":{"182":1}}],["multicandidatechunks",{"2":{"182":2,"183":1}}],["multifinder",{"2":{"64":1,"182":4,"183":1}}],["multiindex",{"2":{"64":2,"182":14,"183":1}}],["multiplier",{"2":{"182":5}}],["multiplication",{"2":{"47":1}}],["multiple",{"0":{"46":1},"2":{"6":1,"8":1,"15":1,"22":1,"46":1,"55":3,"60":1,"64":2,"67":3,"72":1,"93":1,"98":1,"102":1,"105":1,"107":1,"108":1,"167":1,"168":2,"169":2,"175":1,"181":6,"182":10,"183":27}}],["multi",{"0":{"92":1},"2":{"35":1,"42":1,"64":3,"72":1,"181":2,"182":7,"183":8}}],["much",{"0":{"82":1},"2":{"7":2,"8":1,"9":1,"14":1,"41":1,"55":1,"64":2,"106":2,"108":1,"181":3,"182":2,"183":3}}],["mixed",{"2":{"108":1}}],["mix",{"2":{"106":1,"182":1,"183":3}}],["mixtral",{"2":{"28":1,"30":1,"31":2,"37":2,"108":2,"183":1}}],["million",{"2":{"72":2}}],["mickey",{"2":{"67":1,"183":1}}],["middleware",{"2":{"183":1}}],["middle",{"2":{"41":1,"112":1,"181":1,"182":1}}],["mimics",{"2":{"67":1,"183":1}}],["mimic",{"2":{"27":1,"93":1,"101":1,"181":2,"183":3}}],["mind",{"2":{"108":1}}],["minute",{"2":{"79":5}}],["minutes",{"2":{"12":2,"82":2,"183":3}}],["min",{"2":{"64":8,"67":1,"182":12,"183":1}}],["minichunks",{"2":{"67":1,"183":1}}],["minimize",{"2":{"182":1}}],["minimal",{"2":{"56":1}}],["minimum",{"2":{"2":1,"64":2,"67":2,"69":1,"83":1,"182":7,"183":2}}],["mini",{"2":{"55":2,"83":2,"181":2,"182":2}}],["mistakes",{"2":{"130":2}}],["mistrall",{"2":{"183":2}}],["mistralai",{"0":{"24":1,"26":1},"2":{"24":3,"25":1,"26":1,"27":1,"75":1,"84":2,"108":1,"183":5}}],["mistral",{"2":{"0":1,"23":3,"24":7,"26":7,"37":1,"40":1,"47":1,"89":2,"183":22}}],["mistralopenaischema",{"2":{"0":1,"24":2,"26":2,"183":4}}],["missing",{"2":{"7":1,"55":1,"141":1,"142":2,"160":1,"183":6}}],["might",{"2":{"7":1,"11":1,"24":2,"26":1,"55":2,"67":1,"80":2,"81":1,"84":1,"106":1,"181":2,"183":4}}],["mapped",{"2":{"183":1}}],["mapping",{"2":{"93":1,"183":4}}],["map",{"2":{"182":1,"183":12}}],["mapreduce",{"2":{"46":1}}],["madrid",{"2":{"71":1,"72":3}}],["made",{"2":{"52":1,"55":5,"120":1,"153":1,"181":6,"183":2}}],["mascarading",{"2":{"183":1}}],["mask",{"2":{"66":1}}],["mastering",{"2":{"58":1}}],["master",{"2":{"13":2,"35":1,"41":1,"183":5}}],["magenta",{"2":{"58":1,"182":3}}],["maintaining",{"2":{"178":1}}],["maintain",{"2":{"153":1,"183":2}}],["mainly",{"2":{"126":1}}],["main",{"2":{"52":2,"57":1,"58":1,"60":3,"61":1,"64":1,"66":1,"94":1,"104":1,"108":1,"158":1,"180":1,"182":3,"183":7}}],["machine",{"2":{"58":1}}],["machines",{"2":{"35":1}}],["mac",{"2":{"37":1,"84":1}}],["macros",{"2":{"55":1,"183":5}}],["macro",{"2":{"16":1,"34":1,"71":1,"92":3,"130":1,"183":8}}],["markup",{"2":{"183":1}}],["marks",{"2":{"182":1,"183":1}}],["marked",{"2":{"120":1,"183":3}}],["markdown",{"2":{"21":3,"152":1,"153":1,"154":1,"160":1,"164":2,"183":20}}],["marsaglia",{"2":{"181":1}}],["mars",{"2":{"18":1,"183":2}}],["margin=",{"2":{"182":1}}],["margin",{"2":{"2":1,"182":4}}],["manner",{"2":{"152":1,"182":3}}],["management",{"2":{"183":1}}],["managed",{"2":{"181":1,"183":4}}],["manage",{"2":{"181":1}}],["manages",{"2":{"108":1,"181":1,"183":1}}],["manageable",{"2":{"63":1,"67":1,"181":1,"183":1}}],["managing",{"2":{"60":1}}],["manually",{"2":{"27":1,"28":1,"78":1}}],["manymeasurements",{"2":{"20":1,"183":2}}],["many",{"0":{"80":1},"2":{"20":1,"24":1,"27":1,"52":2,"66":1,"74":1,"75":1,"89":1,"93":1,"94":1,"102":1,"108":1,"124":1,"125":1,"183":8}}],["mandarin",{"2":{"15":1}}],["manipulations",{"2":{"159":1}}],["manipulation",{"2":{"2":1,"64":1,"65":1,"182":1}}],["matrices",{"2":{"182":5}}],["matrix",{"2":{"23":1,"46":2,"47":1,"63":1,"182":29,"183":4}}],["mat",{"2":{"182":2}}],["matlab",{"2":{"58":1}}],["matter",{"2":{"36":1}}],["materialized",{"2":{"182":1}}],["materialize",{"2":{"45":1,"183":1}}],["material",{"2":{"13":1}}],["matches",{"2":{"182":2,"183":5}}],["matched",{"2":{"124":1,"125":1,"128":1,"182":2}}],["match",{"2":{"7":2,"9":3,"12":1,"58":5,"63":1,"64":9,"67":7,"182":16,"183":9}}],["matching",{"2":{"7":5,"58":1,"64":3,"67":1,"182":9,"183":1}}],["maybeextract",{"2":{"20":1,"183":18}}],["may",{"2":{"9":1,"34":1,"42":1,"94":2,"140":1,"142":1,"152":1,"180":1,"181":1,"182":1,"183":13}}],["maximize",{"2":{"182":1}}],["maximum",{"2":{"19":1,"22":2,"50":1,"55":4,"67":4,"79":1,"114":1,"179":1,"181":6,"182":3,"183":16}}],["maxes",{"2":{"79":1}}],["max",{"2":{"8":1,"21":1,"22":4,"50":1,"54":2,"55":21,"64":3,"66":1,"67":21,"93":1,"95":1,"108":1,"179":1,"181":29,"182":14,"183":51}}],["makie",{"2":{"64":1,"182":1}}],["making",{"2":{"0":1,"100":1,"105":1}}],["makes",{"2":{"37":1,"106":1,"152":1}}],["make",{"2":{"4":1,"6":1,"7":1,"8":2,"12":1,"29":1,"37":2,"55":2,"64":3,"65":1,"69":2,"79":1,"84":2,"94":1,"108":2,"110":1,"117":1,"118":1,"138":1,"152":3,"153":1,"162":1,"164":1,"181":2,"182":7,"183":8}}],["mention",{"2":{"152":1}}],["mentioning",{"2":{"142":1,"171":1}}],["mentioned",{"2":{"121":1,"126":1,"142":1,"171":1}}],["merely",{"2":{"97":1,"183":1}}],["merged",{"2":{"182":2}}],["merges",{"2":{"182":3}}],["merge",{"2":{"64":2,"182":5,"183":1}}],["melody",{"2":{"67":1,"183":1}}],["memory`",{"2":{"108":1}}],["memory",{"2":{"58":1,"130":1,"159":1,"182":2}}],["memories",{"2":{"13":1,"67":1,"183":1}}],["meetings",{"2":{"152":2,"153":2}}],["meeting",{"2":{"142":1}}],["meets",{"2":{"140":2,"142":1}}],["meet",{"2":{"39":1,"40":1,"42":1,"142":1}}],["mechanisms",{"2":{"106":1}}],["mechanism",{"2":{"24":1,"26":1,"67":1,"108":1,"183":1}}],["medium",{"2":{"24":1,"26":1,"182":4}}],["measuring",{"2":{"182":4}}],["measurement",{"2":{"183":1}}],["measurements",{"2":{"20":2,"183":16}}],["measures",{"2":{"64":1,"67":2,"182":1,"183":2}}],["meantime",{"2":{"183":1}}],["meant",{"2":{"130":1,"131":1,"183":2}}],["meaningful",{"2":{"152":1}}],["meaning",{"2":{"83":2,"127":1,"182":1}}],["means",{"2":{"19":1,"32":1,"37":1,"58":1,"64":2,"66":2,"79":1,"182":3,"183":2}}],["mean",{"2":{"1":1,"7":2,"182":2}}],["me",{"2":{"17":2,"23":3,"24":1,"26":1,"30":2,"31":2,"34":1,"39":1,"42":1,"45":2,"46":4,"47":2,"67":1,"87":1,"93":2,"94":4,"171":1,"183":12}}],["messaging",{"2":{"140":1,"183":3}}],["message=true",{"2":{"83":2}}],["message",{"0":{"34":1,"39":1},"2":{"22":1,"52":2,"54":1,"55":18,"58":1,"64":1,"71":2,"80":1,"83":1,"89":2,"92":2,"94":2,"98":3,"102":3,"107":2,"108":3,"130":1,"131":2,"134":1,"140":1,"149":1,"181":21,"182":4,"183":183}}],["message`",{"2":{"22":1,"54":1,"55":1,"181":1}}],["messagese",{"2":{"183":1}}],["messages",{"0":{"104":1},"2":{"9":1,"13":2,"24":1,"27":1,"36":1,"55":1,"83":1,"92":1,"93":1,"94":1,"96":1,"97":5,"98":1,"100":1,"103":1,"104":2,"107":6,"108":1,"181":4,"183":56}}],["mesages",{"2":{"9":1}}],["meticulously",{"2":{"144":1,"145":1,"147":1,"153":1}}],["meta",{"2":{"98":1,"183":12}}],["metaprogramming",{"2":{"58":1}}],["metadatamessage",{"2":{"183":2}}],["metadata=true",{"2":{"2":1,"8":1}}],["metadata",{"0":{"113":1},"1":{"114":1,"115":1},"2":{"2":4,"8":1,"63":1,"64":1,"98":5,"114":2,"115":1,"126":1,"182":14,"183":39}}],["met",{"2":{"55":12,"93":2,"108":1,"181":16}}],["methoderror",{"2":{"108":1}}],["methods",{"2":{"55":3,"63":1,"64":1,"100":1,"108":1,"181":3,"182":3,"183":6}}],["method",{"2":{"11":1,"52":2,"55":1,"60":2,"64":14,"67":1,"91":1,"100":1,"101":1,"106":1,"179":2,"181":39,"182":158,"183":126}}],["metrics",{"2":{"6":1}}],["move",{"2":{"182":1,"183":1}}],["moved",{"2":{"1":1,"180":1,"181":1,"182":1}}],["mock",{"2":{"181":1}}],["monitoring",{"2":{"183":2}}],["month",{"2":{"81":1}}],["monte",{"2":{"22":1,"52":1,"55":1,"181":3}}],["money",{"2":{"81":1,"183":1}}],["moonlight",{"2":{"67":2,"183":2}}],["mouse",{"2":{"67":1,"183":1}}],["modal",{"2":{"183":2}}],["modality",{"2":{"65":1}}],["modifies",{"2":{"183":2}}],["modified",{"2":{"182":1,"183":1}}],["modification",{"2":{"183":1}}],["modify",{"2":{"9":1}}],["modular",{"2":{"57":1,"181":1}}],["modules",{"2":{"66":1}}],["module",{"0":{"180":1},"2":{"1":1,"9":1,"11":1,"22":1,"48":2,"49":1,"51":2,"52":1,"55":4,"56":2,"58":1,"66":1,"106":1,"180":4,"181":3,"182":2,"183":4}}],["mode=true",{"2":{"183":1}}],["modes",{"2":{"182":1}}],["modern",{"2":{"120":2}}],["moderation",{"2":{"18":1}}],["mode",{"2":{"9":2,"64":1,"108":2,"182":1,"183":28}}],["model3",{"2":{"183":1}}],["model2",{"2":{"183":1}}],["model1",{"2":{"183":3}}],["modeling",{"2":{"58":1}}],["model>",{"2":{"27":1}}],["model=pt",{"2":{"64":1,"182":1}}],["model=",{"2":{"21":2,"22":1,"23":3,"24":3,"26":2,"27":1,"30":2,"31":3,"35":1,"37":1,"43":1,"54":1,"55":3,"62":1,"83":2,"98":3,"106":2,"107":1,"181":3,"182":1,"183":47}}],["modelspec",{"2":{"183":3}}],["models",{"0":{"23":1,"29":1,"37":1,"42":1,"83":1},"1":{"38":1,"39":1,"40":1,"41":1,"42":1,"43":1,"44":1,"45":1,"46":1,"47":1},"2":{"0":1,"11":1,"16":1,"18":1,"23":1,"24":4,"26":3,"28":2,"29":2,"32":1,"36":1,"37":4,"39":1,"42":1,"48":1,"55":1,"64":2,"67":2,"72":1,"74":2,"75":1,"76":5,"80":1,"83":3,"89":3,"90":1,"91":1,"98":1,"100":1,"101":1,"103":1,"106":1,"108":1,"144":1,"145":1,"168":1,"169":1,"173":1,"174":1,"175":1,"181":2,"182":7,"183":38}}],["model",{"0":{"0":1,"16":1,"90":1,"96":1,"101":1},"2":{"0":3,"6":2,"7":2,"11":8,"16":8,"17":1,"18":1,"19":1,"22":3,"23":1,"24":2,"26":1,"27":2,"28":4,"29":5,"30":4,"31":6,"32":2,"33":1,"34":2,"35":1,"37":3,"39":2,"40":2,"41":1,"42":6,"45":2,"46":3,"47":1,"52":8,"54":3,"55":16,"56":1,"57":3,"58":2,"60":2,"62":4,"64":43,"71":3,"82":1,"83":2,"88":1,"89":1,"90":6,"91":3,"92":2,"93":1,"96":1,"97":2,"98":3,"100":3,"101":1,"102":1,"103":1,"104":3,"106":8,"107":4,"108":17,"117":2,"118":4,"144":1,"150":1,"167":1,"171":5,"175":1,"181":19,"182":123,"183":296}}],["moment",{"2":{"1":1,"24":1,"27":1,"32":1,"43":1,"183":3}}],["mostly",{"2":{"96":1}}],["most",{"2":{"1":1,"8":1,"24":1,"26":1,"55":2,"58":1,"60":1,"63":2,"64":3,"88":1,"112":1,"114":2,"115":2,"125":1,"126":1,"136":1,"138":1,"150":1,"154":1,"158":1,"171":1,"181":4,"182":10,"183":12}}],["moreover",{"2":{"9":1}}],["more",{"2":{"0":2,"2":1,"5":3,"6":4,"7":4,"8":1,"9":3,"11":1,"13":2,"14":3,"16":2,"17":1,"18":3,"20":2,"21":1,"22":2,"24":1,"26":1,"28":1,"29":1,"37":1,"43":1,"50":1,"55":4,"58":4,"63":1,"64":6,"65":1,"66":1,"67":1,"69":1,"72":2,"79":1,"80":1,"81":1,"85":1,"86":1,"88":1,"89":1,"90":1,"93":1,"95":1,"97":1,"105":1,"106":2,"107":1,"108":5,"117":1,"126":1,"127":1,"130":1,"179":1,"181":11,"182":34,"183":63}}],["❌",{"2":{"0":26}}],["✅",{"2":{"0":46}}],["w",{"2":{"182":3}}],["wp",{"2":{"21":1,"183":2}}],["www",{"2":{"21":1,"183":2}}],["wraps",{"2":{"66":1,"182":1,"183":8}}],["wrap",{"2":{"66":2,"67":2,"98":6,"182":1,"183":12}}],["wrapped",{"2":{"98":1,"181":1,"183":1}}],["wrapper",{"2":{"20":1,"52":1,"55":3,"64":1,"67":2,"71":1,"181":3,"182":2,"183":20}}],["wrapping",{"2":{"48":1,"66":1}}],["wrong",{"2":{"54":1,"55":1,"93":1,"181":1,"183":2}}],["written",{"2":{"21":1,"140":3,"142":1,"183":2}}],["writing",{"2":{"14":1,"31":1,"36":1,"76":1,"162":1,"164":1,"166":1,"167":3,"174":1,"175":3,"183":2}}],["writer",{"2":{"140":2,"162":2,"164":1}}],["write",{"2":{"4":1,"9":2,"12":1,"104":1,"105":1,"108":4,"125":1,"130":2,"131":1,"160":3,"162":2,"164":3,"166":1,"167":2,"174":1,"175":2,"181":2,"183":2}}],["walk",{"2":{"99":1}}],["walkthrough",{"0":{"107":1,"108":1},"2":{"97":1}}],["wave",{"2":{"67":1,"183":1}}],["wake",{"2":{"67":1,"183":1}}],["warning",{"2":{"182":1,"183":2}}],["warnings",{"2":{"55":1,"181":1}}],["wars",{"2":{"13":1,"35":1,"41":1,"183":5}}],["waiting",{"2":{"108":1}}],["wait",{"2":{"22":1,"54":1,"55":2,"181":2,"183":2}}],["ways",{"2":{"13":1,"41":1,"90":1,"97":1,"183":1}}],["way",{"2":{"9":1,"22":1,"29":1,"52":1,"55":1,"60":1,"82":1,"84":1,"91":1,"92":1,"105":1,"125":1,"167":1,"175":1,"181":1,"182":1,"183":3}}],["was",{"2":{"7":1,"8":1,"9":2,"11":1,"47":1,"55":2,"60":1,"64":1,"67":1,"78":1,"82":1,"93":1,"106":1,"108":1,"120":1,"122":1,"130":1,"171":2,"181":4,"182":5,"183":14}}],["wanted",{"2":{"55":1,"181":1,"183":2}}],["wants",{"2":{"16":1}}],["want",{"2":{"2":1,"3":1,"7":2,"9":1,"11":3,"12":1,"20":1,"22":1,"54":1,"55":2,"58":2,"60":1,"63":2,"64":3,"71":1,"79":2,"88":1,"93":2,"94":3,"100":1,"103":1,"105":1,"106":3,"107":1,"181":2,"182":7,"183":37}}],["won",{"2":{"27":1,"28":1,"90":1}}],["wonders",{"2":{"8":1}}],["worth",{"0":{"82":1},"2":{"82":1}}],["worst",{"2":{"67":1,"183":1}}],["worry",{"2":{"37":1}}],["words",{"2":{"58":1,"64":1,"66":5,"67":13,"95":2,"114":1,"115":1,"124":1,"125":1,"126":1,"158":2,"160":3,"164":1,"177":1,"182":2,"183":14}}],["word",{"2":{"21":1,"22":4,"54":4,"55":3,"64":2,"66":1,"82":1,"110":1,"112":2,"114":1,"115":1,"117":1,"118":1,"120":1,"121":1,"122":1,"124":1,"125":1,"126":1,"127":1,"128":1,"130":3,"131":3,"132":1,"134":1,"136":2,"137":1,"138":2,"140":1,"141":1,"142":1,"144":1,"145":1,"147":1,"149":1,"150":1,"152":1,"153":1,"154":1,"157":1,"158":2,"159":1,"160":1,"161":1,"162":1,"163":1,"164":1,"165":1,"166":1,"167":1,"168":1,"169":1,"170":1,"171":1,"173":1,"174":1,"175":1,"177":1,"178":1,"181":3,"182":9,"183":2}}],["wordcount",{"2":{"9":1,"14":1,"94":1,"183":3}}],["world",{"2":{"9":4,"13":1,"14":1,"15":1,"55":3,"67":3,"104":1,"105":1,"107":2,"108":1,"110":1,"114":2,"117":1,"118":1,"120":1,"124":1,"125":1,"127":1,"136":1,"140":1,"141":1,"142":1,"144":1,"145":1,"147":1,"150":1,"154":1,"157":1,"158":1,"159":1,"160":1,"161":1,"162":1,"163":1,"164":1,"165":1,"166":1,"167":1,"168":1,"169":1,"170":1,"173":1,"174":1,"175":1,"178":1,"181":1,"183":21}}],["workaround",{"2":{"108":1}}],["workload",{"2":{"58":1}}],["workflow",{"0":{"12":1},"2":{"52":1,"55":1,"140":2,"141":2,"142":2,"183":1}}],["workflows",{"0":{"22":1},"2":{"0":1,"17":1,"22":1,"51":1,"90":1,"106":2,"180":1,"183":2}}],["working",{"0":{"32":1},"1":{"33":1,"34":1,"35":1,"36":1},"2":{"11":1,"23":1,"48":1,"62":1,"65":1,"81":1,"89":1,"90":1,"106":1,"130":1,"131":1,"132":1,"182":1,"183":2}}],["work",{"2":{"7":1,"9":1,"12":1,"32":1,"37":1,"55":1,"60":1,"69":1,"93":1,"97":1,"98":1,"103":1,"106":1,"140":1,"181":1,"182":2}}],["workspace",{"2":{"183":2}}],["works",{"0":{"99":1},"1":{"100":1,"101":1,"102":1,"103":1,"104":1,"105":1,"106":1,"107":1,"108":1},"2":{"0":1,"24":2,"26":1,"67":1,"84":1,"99":1,"107":1,"108":2,"182":1,"183":7}}],["would",{"0":{"8":1,"96":1},"2":{"3":1,"4":1,"7":2,"9":3,"14":1,"39":1,"58":1,"60":1,"62":2,"78":1,"79":2,"91":1,"92":1,"93":2,"94":1,"98":3,"108":1,"124":1,"125":1,"182":3,"183":3}}],["welcome",{"2":{"182":1}}],["well",{"2":{"2":1,"22":1,"29":1,"31":1,"54":1,"55":1,"67":1,"108":4,"121":1,"140":3,"142":1,"168":1,"169":1,"181":2,"182":5,"183":5}}],["weaker",{"2":{"183":1}}],["weaving",{"2":{"171":1}}],["weather",{"2":{"20":3,"183":39}}],["web",{"2":{"58":2,"117":1,"118":9,"163":1,"182":10}}],["websearch",{"2":{"49":1,"50":4,"179":5,"183":1}}],["website",{"2":{"12":1,"77":1}}],["were",{"2":{"21":1,"37":1,"62":1,"153":1,"182":1,"183":1}}],["weighs",{"2":{"20":1,"183":7}}],["weight",{"2":{"20":2,"183":12}}],["went",{"2":{"11":1,"106":1}}],["we",{"0":{"8":1,"95":1},"2":{"2":1,"3":3,"4":1,"5":3,"6":4,"7":8,"8":1,"9":3,"11":6,"12":2,"13":1,"14":1,"16":1,"18":1,"19":2,"20":1,"21":1,"22":13,"23":2,"24":5,"26":3,"27":1,"30":1,"31":1,"32":2,"36":2,"37":1,"42":1,"45":1,"54":13,"55":24,"56":1,"58":5,"60":1,"62":2,"63":2,"64":4,"67":1,"70":1,"76":2,"79":5,"92":2,"93":14,"94":2,"95":1,"99":1,"102":2,"103":1,"105":4,"106":4,"107":9,"108":23,"181":22,"182":24,"183":61}}],["wise",{"2":{"182":1}}],["wisp",{"2":{"67":1,"183":1}}],["wiki",{"2":{"181":2}}],["wikipedia",{"2":{"181":2,"182":3}}],["width",{"2":{"66":1,"67":3,"182":5,"183":12}}],["wide",{"2":{"30":1,"31":1,"55":1,"177":1,"181":1}}],["wins",{"2":{"181":9}}],["winning",{"2":{"55":2,"181":2}}],["winks",{"2":{"41":2}}],["win",{"2":{"12":1}}],["windows",{"2":{"58":1,"67":2,"84":1,"183":2}}],["window",{"2":{"2":1,"67":2,"69":1,"84":1,"181":1,"182":12,"183":3}}],["will",{"2":{"1":2,"2":2,"4":1,"9":1,"11":1,"14":2,"19":2,"20":1,"22":2,"28":1,"35":1,"41":2,"52":1,"54":1,"55":24,"58":1,"60":1,"62":1,"64":21,"67":5,"69":2,"70":1,"71":1,"74":1,"79":1,"81":1,"82":2,"84":1,"92":2,"94":4,"97":3,"98":1,"102":4,"106":1,"108":1,"112":1,"115":1,"130":1,"136":1,"138":1,"140":1,"142":1,"166":1,"167":1,"168":2,"169":2,"174":1,"175":1,"177":1,"181":36,"182":40,"183":72}}],["without",{"2":{"41":1,"55":1,"60":1,"67":1,"78":1,"97":1,"105":1,"118":1,"120":1,"149":1,"153":1,"171":1,"182":3,"183":5}}],["within",{"2":{"11":1,"64":1,"67":2,"106":1,"120":1,"181":2,"182":7,"183":14}}],["with",{"0":{"1":1,"22":1,"32":1,"33":1,"37":1,"38":1,"43":1,"44":1,"71":1,"72":1},"1":{"2":1,"33":1,"34":2,"35":2,"36":2,"38":1,"39":2,"40":2,"41":2,"42":2,"43":1,"44":1,"45":2,"46":2,"47":2},"2":{"0":4,"1":3,"2":1,"6":2,"7":4,"8":1,"9":11,"11":12,"12":2,"13":2,"14":4,"15":1,"16":1,"19":1,"20":5,"21":2,"22":5,"23":2,"24":5,"26":2,"27":4,"28":2,"29":1,"30":3,"31":4,"32":1,"34":2,"35":1,"36":1,"37":2,"39":2,"40":1,"41":1,"42":4,"45":1,"46":1,"48":1,"52":5,"54":6,"55":27,"56":3,"57":1,"58":10,"60":3,"61":4,"62":1,"63":2,"64":24,"65":2,"67":8,"69":3,"71":2,"74":1,"76":1,"77":2,"78":1,"79":2,"82":4,"83":1,"84":1,"89":3,"90":2,"91":1,"92":4,"93":5,"94":2,"96":3,"97":2,"98":7,"99":1,"100":2,"101":1,"104":3,"105":7,"106":13,"107":2,"108":9,"110":1,"112":2,"114":1,"117":2,"118":2,"121":2,"122":1,"124":1,"125":1,"126":3,"127":2,"128":1,"130":3,"131":2,"132":1,"134":1,"136":3,"138":2,"140":6,"141":4,"142":3,"144":2,"145":1,"147":1,"150":1,"152":2,"153":3,"154":2,"160":5,"161":2,"163":1,"165":1,"167":2,"168":1,"169":1,"170":1,"171":4,"173":1,"175":2,"181":50,"182":69,"183":167}}],["whose",{"2":{"183":2}}],["who",{"2":{"27":1,"50":2,"154":1,"179":2}}],["whole",{"0":{"7":1},"2":{"11":1,"37":1,"92":2,"103":1,"106":1,"167":1,"175":1,"181":1,"183":5}}],["while",{"2":{"130":1,"167":1,"175":1,"181":1,"183":1}}],["whispered",{"2":{"67":4,"183":4}}],["white",{"2":{"22":1,"54":1,"55":1,"67":1,"181":1,"183":6}}],["whichever",{"2":{"46":1}}],["which",{"2":{"0":1,"7":3,"9":1,"11":3,"18":1,"22":1,"28":1,"34":1,"37":2,"42":1,"52":4,"55":4,"57":2,"58":2,"60":1,"63":1,"64":5,"67":2,"79":1,"88":1,"94":1,"95":1,"96":1,"97":1,"100":1,"101":1,"106":3,"107":1,"108":2,"120":2,"140":1,"152":1,"167":1,"175":1,"181":5,"182":23,"183":50}}],["why",{"0":{"74":1},"1":{"75":1},"2":{"14":1,"20":1,"55":2,"108":1,"130":1,"131":1,"181":1,"183":3}}],["whatever",{"2":{"55":1,"88":1,"181":1,"183":1}}],["what",{"0":{"8":1,"75":1,"97":1},"2":{"2":3,"5":1,"6":1,"7":5,"9":1,"12":1,"13":2,"14":3,"16":1,"20":1,"22":1,"35":1,"40":1,"41":1,"54":1,"55":2,"58":4,"64":4,"67":1,"71":4,"72":1,"83":2,"92":3,"93":1,"97":3,"100":1,"107":3,"120":1,"130":4,"141":1,"150":1,"162":4,"181":2,"182":13,"183":30}}],["whether",{"2":{"7":4,"11":3,"18":1,"50":3,"55":2,"64":4,"106":3,"108":1,"137":2,"179":3,"181":8,"182":12,"183":33}}],["whenever",{"2":{"182":3}}],["when",{"2":{"0":1,"9":4,"11":4,"12":1,"18":1,"22":2,"23":1,"28":1,"52":5,"54":1,"55":2,"58":1,"60":3,"63":1,"64":1,"78":1,"89":1,"90":1,"93":1,"94":1,"100":1,"104":2,"105":2,"106":4,"107":2,"108":3,"150":1,"152":2,"153":1,"157":1,"161":1,"163":1,"165":1,"166":3,"170":1,"173":1,"174":1,"177":1,"181":8,"182":5,"183":31}}],["whereas",{"2":{"7":1,"108":1}}],["where",{"2":{"0":1,"7":1,"11":1,"13":1,"42":1,"52":1,"55":4,"58":1,"64":3,"66":1,"67":1,"69":1,"78":1,"79":1,"84":1,"93":1,"97":1,"105":1,"106":1,"108":1,"124":1,"125":1,"153":2,"160":1,"181":7,"182":22,"183":26}}],["b64",{"2":{"183":4}}],["b",{"2":{"167":2,"175":2,"181":1,"182":3}}],["b>",{"2":{"67":2,"182":2,"183":12}}],["broader",{"2":{"183":2}}],["browser",{"2":{"89":1}}],["br",{"2":{"182":1}}],["br>",{"2":{"67":1,"182":1,"183":6}}],["brand",{"2":{"108":4}}],["branching",{"2":{"55":1,"181":1}}],["branch",{"2":{"55":1,"181":1}}],["branches",{"2":{"55":4,"67":1,"181":4,"183":1}}],["brackets",{"2":{"58":1,"64":2,"153":1,"182":3}}],["breath",{"2":{"131":1,"174":1}}],["breaks",{"2":{"67":1,"182":1,"183":1}}],["break",{"2":{"58":1,"131":1,"132":1}}],["bread",{"2":{"31":2,"108":2}}],["bright",{"2":{"183":2}}],["bring",{"2":{"70":1}}],["brings",{"2":{"13":1}}],["briefly",{"2":{"153":1}}],["brief",{"2":{"9":3,"66":1,"104":1,"105":1,"107":2,"110":1,"117":1,"118":1,"124":1,"130":1,"152":3,"157":1,"158":1,"159":1,"160":7,"161":1,"163":1,"164":1,"165":1,"166":1,"167":1,"170":1,"171":1,"173":1,"174":1,"175":1}}],["bge",{"2":{"30":1}}],["binx",{"2":{"182":4}}],["bin",{"2":{"182":4}}],["binint",{"2":{"182":4}}],["binary",{"2":{"182":18}}],["binarycosinesimilarity",{"2":{"182":5,"183":1}}],["binarybatchembedder",{"2":{"182":5,"183":1}}],["biology",{"2":{"158":1}}],["billing",{"2":{"69":1,"80":2,"81":1}}],["bigger",{"2":{"108":1}}],["big",{"2":{"58":1,"62":1,"64":1,"67":1,"182":1,"183":1}}],["bitmatrix",{"2":{"182":3}}],["bits",{"2":{"182":13,"183":1}}],["bitpackedcosinesimilarity",{"2":{"182":5,"183":1}}],["bitpackedbatchembedder",{"2":{"182":5,"183":1}}],["bit",{"2":{"28":1,"39":1,"63":1,"108":1,"158":1,"182":4}}],["biases",{"2":{"183":1}}],["bias",{"2":{"11":1,"19":1,"106":1,"183":12}}],["blank",{"2":{"149":1}}],["blanksystemuser",{"0":{"149":1},"2":{"97":2,"98":1,"181":1,"183":3}}],["black",{"2":{"22":1,"54":1,"55":1,"67":1,"108":1,"181":1,"182":1,"183":6}}],["blogtitleimagegenerator",{"0":{"177":1}}],["blog",{"2":{"140":1,"164":4,"177":4}}],["blob",{"2":{"67":1,"182":1,"183":14}}],["block",{"2":{"55":15,"64":3,"130":1,"160":1,"181":3,"182":7,"183":27}}],["blocks",{"2":{"55":6,"64":2,"167":2,"175":2,"181":3,"182":2,"183":25}}],["blocking",{"2":{"12":1,"72":1}}],["blue",{"2":{"22":1,"54":1,"55":3,"58":1,"181":3,"182":3,"183":2}}],["bold",{"2":{"182":4}}],["body",{"2":{"160":2,"183":15}}],["bodies",{"2":{"67":1,"183":1}}],["border",{"2":{"67":4,"182":4,"183":24}}],["boundary",{"2":{"182":1}}],["boundaries",{"2":{"182":3,"183":1}}],["bound",{"2":{"55":7,"181":9}}],["bounds",{"2":{"55":1,"120":1,"181":1}}],["bool=isnothing",{"2":{"183":1}}],["bool=true",{"2":{"55":4,"64":1,"181":1,"182":1,"183":5}}],["bool=false",{"2":{"55":12,"181":6,"182":1,"183":36}}],["boolean",{"2":{"55":2,"93":1,"181":4,"182":13,"183":20}}],["bool",{"2":{"11":2,"22":1,"50":3,"54":1,"55":14,"64":15,"93":3,"106":2,"108":2,"179":3,"181":16,"182":63,"183":103}}],["both",{"2":{"7":4,"22":2,"41":1,"54":2,"55":2,"64":1,"131":1,"181":2,"182":9,"183":7}}],["bang",{"2":{"71":1}}],["bandit",{"2":{"55":1,"181":2}}],["barplot",{"2":{"64":1,"182":1}}],["bad",{"2":{"55":1,"67":1,"181":1,"183":1}}],["bakllava",{"2":{"42":1,"43":1,"183":3}}],["balance",{"2":{"35":1,"41":2,"80":2}}],["baai",{"2":{"30":1}}],["batched",{"2":{"182":3}}],["batchembedder",{"2":{"64":1,"182":11,"183":1}}],["batch",{"2":{"15":1,"64":3,"182":21}}],["backpropagate",{"2":{"181":5,"183":1}}],["backticks",{"2":{"130":1,"131":1,"154":1,"183":1}}],["back",{"2":{"55":4,"92":1,"93":1,"107":1,"181":5,"183":5}}],["background",{"2":{"12":1,"23":1,"89":1}}],["backspace",{"2":{"9":1}}],["bash",{"2":{"28":1,"170":1}}],["basename",{"2":{"94":1,"183":1}}],["base",{"2":{"24":2,"26":1,"27":1,"55":1,"56":1,"153":1,"182":1,"183":11}}],["base64decode",{"2":{"183":1}}],["base64",{"2":{"11":1,"104":1,"106":1,"183":2}}],["based",{"2":{"7":2,"55":3,"58":2,"60":1,"63":6,"64":8,"110":2,"112":4,"117":1,"118":1,"120":1,"121":1,"122":1,"126":1,"130":2,"131":2,"132":1,"137":1,"138":1,"141":1,"158":2,"168":1,"169":1,"177":1,"181":10,"182":27,"183":21}}],["basic",{"0":{"109":1},"1":{"110":1},"2":{"11":1,"50":3,"100":1,"104":1,"106":1,"179":3}}],["bm25similarity",{"2":{"64":1,"182":6,"183":1}}],["bm25",{"2":{"8":1,"64":2,"182":15}}],["buffer",{"2":{"183":1}}],["business",{"2":{"160":1}}],["bullets",{"2":{"182":1}}],["bullet",{"2":{"130":5,"152":7,"153":5,"160":3}}],["bundle",{"2":{"96":1}}],["bug",{"2":{"78":2}}],["buy",{"2":{"69":1,"82":1}}],["but",{"2":{"5":1,"6":2,"7":2,"9":2,"11":3,"12":2,"13":2,"14":1,"19":1,"20":2,"24":1,"28":2,"29":1,"30":1,"31":1,"41":2,"50":1,"58":1,"64":2,"67":3,"72":1,"78":1,"79":1,"84":1,"92":1,"97":1,"99":1,"104":1,"106":3,"108":2,"142":1,"153":1,"173":1,"179":1,"181":1,"182":11,"183":27}}],["built",{"2":{"4":1,"57":1,"60":1,"114":1,"181":1,"182":1,"183":2}}],["builds",{"2":{"64":1,"182":4,"183":7}}],["build",{"2":{"1":1,"2":6,"4":1,"8":2,"11":2,"13":1,"52":1,"56":1,"57":4,"58":4,"60":3,"61":3,"63":4,"64":12,"108":1,"182":40,"183":19}}],["building",{"0":{"1":1},"1":{"2":1},"2":{"22":1,"51":1,"55":1,"56":1,"57":1,"58":1,"64":2,"163":1,"180":1,"181":2,"182":6}}],["bearer",{"2":{"183":3}}],["berlin",{"2":{"182":1}}],["belong",{"2":{"153":1,"154":1,"183":1}}],["below",{"2":{"0":1,"23":1,"55":2,"74":1,"78":1,"79":1,"88":1,"108":1,"115":1,"117":1,"125":1,"160":1,"166":1,"174":1,"181":2,"182":1,"183":8}}],["believe",{"2":{"130":1,"131":1,"132":1}}],["beneath",{"2":{"67":2,"183":2}}],["benefits",{"2":{"108":1}}],["benefit",{"2":{"55":1,"181":1}}],["behave",{"2":{"104":1}}],["behavior",{"2":{"52":1,"60":2,"67":2,"83":2,"91":1,"98":1,"108":1,"183":2}}],["behavioural",{"2":{"154":1,"158":1}}],["behaviours",{"2":{"55":1,"181":1}}],["behaviour",{"2":{"22":1,"54":1,"55":1,"108":1,"181":1}}],["behind",{"2":{"67":1,"171":1,"183":1}}],["begin",{"2":{"130":1,"140":1,"167":3,"175":3,"183":1}}],["beginning",{"2":{"92":1,"120":1,"183":3}}],["beginners",{"2":{"58":1}}],["begins",{"2":{"63":1,"171":1}}],["besides",{"2":{"183":2}}],["bespoke",{"2":{"55":1,"183":2}}],["best",{"2":{"5":1,"22":1,"24":2,"26":2,"30":1,"31":1,"34":1,"52":1,"54":1,"55":3,"58":7,"64":1,"94":2,"136":2,"138":2,"142":1,"154":1,"155":1,"156":1,"160":1,"181":12,"182":7,"183":5}}],["before",{"2":{"55":4,"66":1,"67":1,"69":2,"79":1,"84":1,"92":1,"130":1,"144":2,"152":1,"153":1,"181":1,"182":1,"183":11}}],["been",{"2":{"24":1,"26":1,"42":1,"55":3,"97":1,"105":1,"131":1,"141":1,"142":1,"181":4,"183":5}}],["becoming",{"2":{"13":1}}],["become",{"2":{"9":1,"13":3,"35":1,"41":2,"183":7}}],["because",{"2":{"7":1,"12":1,"22":1,"24":1,"26":1,"28":2,"36":1,"54":1,"55":1,"64":1,"106":1,"108":3,"181":3,"182":2,"183":5}}],["beta",{"2":{"181":3,"183":5}}],["betwee",{"2":{"55":1,"181":1}}],["between",{"2":{"6":1,"7":2,"11":1,"17":2,"22":1,"54":1,"55":5,"58":2,"66":4,"67":5,"100":1,"104":1,"121":1,"140":2,"141":2,"158":1,"181":6,"182":11,"183":15}}],["better",{"2":{"8":1,"50":1,"52":1,"64":1,"67":2,"84":1,"108":3,"117":3,"118":3,"140":1,"142":2,"179":1,"182":5,"183":4}}],["be",{"0":{"78":2},"2":{"1":1,"2":1,"7":2,"9":1,"11":5,"13":1,"16":4,"17":1,"18":1,"19":1,"22":3,"23":1,"24":1,"26":1,"28":1,"35":1,"41":1,"50":1,"52":2,"54":3,"55":23,"56":1,"57":1,"58":3,"60":2,"62":3,"63":1,"64":39,"66":1,"67":7,"70":1,"71":2,"74":1,"79":1,"80":1,"81":1,"83":1,"87":1,"89":1,"90":1,"93":2,"94":3,"95":1,"97":2,"98":2,"99":1,"100":1,"101":1,"102":1,"103":1,"106":4,"108":2,"110":1,"112":3,"115":1,"117":1,"118":1,"120":1,"121":1,"124":1,"125":1,"126":1,"130":6,"131":4,"136":1,"138":2,"140":3,"141":1,"142":1,"150":1,"152":5,"153":4,"154":8,"158":1,"160":2,"164":1,"167":4,"168":2,"169":2,"171":3,"175":4,"177":3,"179":1,"180":2,"181":33,"182":90,"183":211}}],["being",{"2":{"0":1,"5":1,"7":1,"41":1,"55":1,"152":1,"181":1,"183":7}}],["by",{"2":{"0":3,"6":2,"9":6,"11":2,"14":1,"22":2,"24":1,"27":1,"52":3,"55":5,"56":1,"57":2,"58":4,"60":2,"62":1,"64":6,"66":1,"67":5,"76":1,"77":1,"78":1,"95":2,"97":2,"98":1,"106":1,"107":2,"108":2,"112":2,"114":1,"120":1,"121":1,"124":1,"125":1,"128":1,"130":4,"131":3,"132":1,"140":5,"142":2,"144":1,"152":1,"153":1,"155":1,"156":2,"162":1,"164":1,"166":1,"171":1,"174":1,"181":12,"182":41,"183":62}}],["eyes",{"2":{"183":1}}],["europe",{"2":{"182":4}}],["educator",{"2":{"164":1}}],["educational",{"2":{"107":1,"164":1}}],["editor",{"2":{"140":4}}],["editing",{"2":{"12":1}}],["et",{"2":{"182":3}}],["ethos",{"2":{"88":1}}],["ethereal",{"2":{"67":1,"183":1}}],["etc",{"2":{"0":1,"2":1,"9":1,"16":1,"22":1,"54":1,"55":2,"64":1,"74":1,"91":1,"97":1,"104":1,"106":1,"114":2,"117":1,"159":1,"178":1,"181":2,"182":13,"183":12}}],["echoes",{"2":{"183":5}}],["echoing",{"2":{"67":1,"183":1}}],["ecosystem",{"2":{"124":1}}],["econometrics",{"2":{"58":1}}],["e2e",{"2":{"61":1}}],["equally",{"2":{"183":1}}],["equality",{"2":{"93":1}}],["equal",{"2":{"50":1,"55":1,"64":1,"67":2,"179":1,"181":1,"182":3,"183":2}}],["equivalent",{"2":{"2":2,"14":1,"58":1,"106":1,"183":1}}],["essence",{"2":{"153":1}}],["essential",{"2":{"121":1,"181":2}}],["estimated",{"2":{"72":1}}],["estimate",{"2":{"36":1,"183":3}}],["especially",{"2":{"23":1,"55":1,"66":1,"89":1,"90":1,"107":1,"154":1,"181":2}}],["elapsed",{"2":{"182":1,"183":22}}],["elaboration",{"2":{"120":1}}],["elicit",{"2":{"150":1}}],["else`",{"2":{"168":1,"169":1}}],["elseif",{"2":{"168":1,"169":1}}],["else",{"2":{"19":2,"41":1,"55":2,"93":2,"117":2,"118":2,"158":1,"181":2,"183":5}}],["elementwise",{"2":{"182":1}}],["element",{"2":{"9":3,"14":2,"17":1,"20":1,"23":1,"47":1,"92":1,"94":2,"97":3,"107":2,"182":1,"183":15}}],["evolving",{"2":{"181":1}}],["evolved",{"2":{"153":1}}],["ever",{"2":{"183":2}}],["everyone",{"2":{"164":1}}],["every",{"2":{"60":1,"79":1,"82":1,"84":1,"90":1,"103":1,"178":1,"181":1,"182":1}}],["everything",{"2":{"19":2,"93":1,"98":1,"183":7}}],["eventmessage",{"2":{"183":1}}],["event",{"2":{"183":2}}],["even",{"2":{"20":2,"29":1,"30":1,"50":1,"54":1,"55":2,"62":1,"67":1,"152":1,"179":1,"181":1,"182":1,"183":5}}],["eval=false",{"2":{"55":2,"183":2}}],["evalutes",{"2":{"181":1}}],["evaluted",{"2":{"55":1,"181":1}}],["evaluator",{"2":{"134":2,"181":3}}],["evaluating",{"2":{"18":1,"55":2,"120":1,"137":1,"140":1,"182":1,"183":2}}],["evaluation",{"0":{"119":1},"1":{"120":1,"121":1,"122":1},"2":{"4":1,"6":2,"8":1,"11":1,"52":2,"55":10,"57":1,"106":1,"121":1,"122":1,"181":10,"182":9,"183":10}}],["evaluations",{"0":{"3":1},"1":{"4":1,"5":1,"6":1,"7":1},"2":{"55":3,"64":2,"181":5,"182":4}}],["evaluated",{"2":{"55":8,"130":1,"131":1,"181":3,"183":9}}],["evaluates",{"2":{"22":1,"54":1,"55":3,"181":2,"182":2,"183":3}}],["evaluate",{"0":{"6":1,"7":1},"2":{"3":1,"6":2,"7":2,"22":1,"54":1,"55":11,"121":2,"141":1,"142":1,"181":22,"183":4}}],["eval",{"2":{"7":2,"9":1,"55":9,"182":1,"183":13}}],["evals",{"2":{"4":6,"5":1,"6":3,"7":7,"57":1,"64":4,"182":18,"183":3}}],["effectiveness",{"2":{"140":1,"142":1}}],["effective",{"2":{"125":1,"127":1,"150":1}}],["effectively",{"2":{"13":1,"62":2,"66":1,"91":1,"103":1,"107":2,"181":1,"183":7}}],["efficiently",{"2":{"58":1,"142":1,"181":1}}],["efficient",{"2":{"55":2,"58":1,"65":1,"160":2,"181":2,"182":4}}],["effort",{"2":{"14":1,"56":1}}],["emails",{"2":{"160":3}}],["email",{"2":{"160":10}}],["emphasize",{"2":{"140":1,"168":1,"169":1}}],["empty",{"0":{"78":2},"2":{"50":2,"55":2,"64":2,"67":6,"179":2,"181":1,"182":4,"183":40}}],["emotions",{"2":{"67":2,"183":2}}],["emotional",{"2":{"13":1}}],["emb",{"2":{"58":1,"182":21}}],["embedder",{"2":{"64":17,"182":29}}],["embedded",{"2":{"46":1,"64":1,"182":6}}],["embedding",{"0":{"45":1,"46":1},"2":{"2":1,"8":2,"17":1,"30":1,"31":1,"46":2,"63":1,"64":8,"182":54,"183":9}}],["embeddings",{"0":{"17":1,"44":1},"1":{"45":1,"46":1,"47":1},"2":{"2":1,"11":2,"17":3,"47":1,"61":2,"63":4,"64":8,"104":1,"106":2,"182":62,"183":20}}],["embeds",{"2":{"2":1,"64":1,"182":4}}],["embed",{"2":{"2":3,"17":3,"23":3,"24":1,"26":1,"30":1,"31":2,"45":2,"46":4,"47":1,"183":4}}],["either",{"2":{"11":1,"20":1,"24":1,"26":1,"42":1,"50":1,"60":1,"79":1,"93":1,"106":1,"179":1,"181":1,"183":7}}],["e",{"2":{"11":1,"20":1,"55":4,"106":1,"108":2,"112":1,"136":1,"138":1,"141":1,"181":2,"182":1,"183":13}}],["error=true",{"2":{"183":3}}],["errorexception",{"2":{"55":1,"181":1}}],["errors",{"2":{"20":1,"22":1,"23":1,"52":1,"54":1,"55":7,"64":1,"80":1,"108":1,"130":2,"132":1,"142":3,"181":10,"182":5,"183":3}}],["error",{"0":{"78":2,"79":1,"80":1},"2":{"7":2,"52":2,"55":13,"78":2,"79":3,"80":4,"93":1,"108":4,"130":2,"131":1,"181":14,"183":33}}],["earlier",{"2":{"78":1}}],["eating",{"2":{"31":1,"108":1}}],["easiest",{"2":{"71":1,"88":1,"91":1}}],["easier",{"2":{"0":1,"7":1,"29":1,"64":1,"65":1,"67":1,"91":1,"106":1,"152":1,"182":2,"183":1}}],["easily",{"2":{"16":1,"57":1,"64":1,"71":1,"93":1,"96":1,"182":2,"183":2}}],["easy",{"2":{"6":1,"47":1,"69":1,"84":1,"87":1,"89":1,"97":1,"108":1,"149":1,"153":1,"164":1,"183":5}}],["each",{"2":{"2":2,"4":1,"7":4,"11":2,"13":1,"16":1,"22":2,"54":2,"55":3,"58":1,"60":2,"62":1,"63":2,"64":21,"67":12,"82":1,"106":1,"112":1,"114":1,"115":1,"121":1,"126":1,"130":1,"141":1,"142":1,"152":3,"153":6,"154":4,"160":1,"164":1,"167":2,"171":2,"175":2,"181":6,"182":59,"183":31}}],["exclamation",{"2":{"182":1}}],["exclude",{"2":{"50":2,"153":1,"179":2,"182":3,"183":3}}],["exciting",{"2":{"24":1,"27":1}}],["exceed",{"2":{"79":1,"183":2}}],["exceeds",{"2":{"67":1,"183":1}}],["exceeding",{"2":{"64":1,"67":1,"182":7,"183":1}}],["exceeded",{"0":{"79":1},"2":{"22":1,"54":1,"79":2,"80":1}}],["excessive",{"2":{"58":1}}],["exception",{"2":{"55":4,"60":1,"108":1,"181":2,"183":3}}],["except",{"2":{"22":1,"181":1,"183":1}}],["external",{"2":{"120":1,"183":1}}],["extension",{"2":{"182":1,"183":2}}],["extensions",{"2":{"52":1,"56":1}}],["extensively",{"2":{"162":1,"164":1}}],["extensive",{"2":{"161":1,"168":1}}],["extensible",{"2":{"55":1,"57":1,"60":1,"181":1}}],["extended",{"2":{"55":1,"181":1,"183":1}}],["extend",{"2":{"16":1,"24":1,"56":1}}],["extremely",{"2":{"80":1,"121":2,"131":1,"140":1,"162":1,"164":1}}],["extras",{"2":{"64":3,"182":8,"183":2}}],["extra",{"2":{"28":1,"64":1,"67":2,"71":1,"93":1,"97":1,"153":1,"182":1,"183":5}}],["extractdata",{"0":{"147":1}}],["extractdataxml",{"0":{"145":1}}],["extractdatacotxml",{"0":{"144":1},"2":{"183":1}}],["extracted",{"2":{"114":3,"183":21}}],["extractor",{"2":{"108":1}}],["extraction",{"0":{"20":1,"146":1},"1":{"147":1},"2":{"31":1,"64":4,"108":1,"114":2,"144":2,"145":2,"147":2,"182":6,"183":17}}],["extracting",{"2":{"2":1,"20":3,"64":1,"182":1,"183":2}}],["extracts",{"2":{"2":1,"52":1,"55":1,"63":2,"64":2,"114":1,"115":1,"181":1,"182":7,"183":4}}],["extract",{"2":{"2":2,"8":1,"11":2,"20":3,"31":2,"55":5,"98":2,"106":2,"107":1,"108":10,"114":4,"144":1,"145":1,"147":1,"181":5,"182":9,"183":74}}],["executing",{"2":{"182":2}}],["execution",{"0":{"15":1},"2":{"11":1,"52":2,"55":11,"106":1,"131":1,"132":1,"181":6,"183":14}}],["executor",{"2":{"55":1,"183":1}}],["execute",{"2":{"55":3,"106":1,"181":1,"183":11}}],["executed",{"2":{"52":1,"55":4,"131":1,"181":2,"183":5}}],["executes",{"2":{"11":1,"52":2,"55":2,"106":1,"181":4,"183":3}}],["examine",{"2":{"142":1}}],["example>",{"2":{"175":2}}],["examples",{"0":{"10":1,"53":1,"58":1},"1":{"11":1,"12":1,"13":1,"14":1,"15":1,"16":1,"17":1,"18":1,"19":1,"20":1,"21":1,"22":1,"23":1,"24":1,"54":1},"2":{"2":5,"4":2,"5":1,"6":1,"9":1,"14":2,"22":1,"32":1,"37":1,"55":2,"58":2,"64":9,"67":3,"72":2,"75":1,"91":1,"96":1,"97":1,"114":1,"154":2,"161":1,"163":1,"166":1,"170":1,"174":1,"181":1,"182":20,"183":24}}],["example",{"0":{"107":1,"108":1},"2":{"0":1,"1":1,"2":1,"5":1,"7":1,"9":3,"11":1,"14":1,"16":1,"18":1,"19":1,"22":1,"31":1,"37":1,"42":1,"50":1,"52":1,"55":6,"58":1,"62":1,"64":5,"67":3,"82":2,"83":3,"91":1,"93":3,"95":1,"96":1,"97":2,"98":1,"99":1,"102":1,"105":1,"106":2,"108":4,"114":1,"120":1,"154":4,"158":1,"167":1,"179":1,"181":10,"182":21,"183":52}}],["exact",{"2":{"64":2,"182":10,"183":7}}],["exactly",{"2":{"9":1,"11":2,"22":1,"52":1,"55":1,"64":1,"106":2,"181":5,"182":2,"183":1}}],["exit",{"2":{"9":1}}],["existent",{"2":{"22":1,"54":1,"55":1,"181":1}}],["existing",{"2":{"9":1,"60":1,"64":2,"98":1,"117":1,"118":1,"182":3,"183":5}}],["exists",{"2":{"7":2,"182":1}}],["exist",{"2":{"6":1,"7":6,"97":4}}],["expr",{"2":{"183":4}}],["expression",{"2":{"55":6,"140":1,"183":8}}],["export",{"2":{"69":1,"84":1}}],["exported",{"2":{"9":1,"37":1,"55":1,"181":1}}],["expanded",{"2":{"183":2}}],["expanding",{"2":{"126":1}}],["expand",{"2":{"97":1,"107":1,"126":1,"181":8,"183":1}}],["expands",{"2":{"18":1,"97":1,"181":2}}],["expanse",{"2":{"67":1,"183":1}}],["expect",{"2":{"183":1}}],["expected",{"2":{"183":2}}],["expectations",{"2":{"142":1}}],["expects",{"2":{"136":1,"138":1,"167":1,"175":1,"183":1}}],["expertise",{"2":{"161":1}}],["expert",{"2":{"125":1,"127":1,"142":4,"144":1,"145":1,"147":1,"160":1,"161":2,"164":1,"167":1,"170":1,"175":1,"183":1}}],["experiencing",{"2":{"78":1}}],["experience",{"2":{"13":1}}],["experiences",{"2":{"13":1}}],["experiment",{"2":{"55":1,"96":1,"181":1,"183":1}}],["experimental",{"0":{"22":1,"180":1},"2":{"1":3,"11":4,"22":2,"48":2,"50":1,"51":3,"55":6,"56":3,"60":1,"63":1,"64":6,"66":1,"93":1,"106":2,"179":4,"180":5,"181":70,"182":281,"183":177}}],["expensive",{"2":{"3":1,"55":3,"181":6}}],["exploits",{"2":{"183":2}}],["exploitation",{"2":{"181":1}}],["exploration",{"2":{"181":1}}],["explorer",{"2":{"78":1}}],["explore",{"0":{"5":1},"2":{"5":1,"8":1,"41":1,"55":2,"58":1,"93":1,"181":2,"183":1}}],["explanatory",{"2":{"183":1}}],["explanations",{"2":{"0":1,"20":1,"141":1,"142":1}}],["explaining",{"2":{"167":1,"175":1}}],["explain",{"2":{"112":1,"130":1,"131":1,"144":1,"171":1,"182":1}}],["explains",{"2":{"99":1,"171":1}}],["explicit",{"2":{"64":2,"144":1,"145":1,"147":1,"182":2,"183":8}}],["explicitly",{"2":{"2":1,"14":1,"22":1,"24":1,"26":1,"42":1,"55":3,"90":1,"94":1,"124":1,"160":2,"168":1,"169":1,"180":1,"181":3,"183":5}}],["enforces",{"2":{"183":2}}],["enforce",{"2":{"183":14}}],["encapsulates",{"2":{"181":1}}],["encapsulated",{"2":{"181":1}}],["encoding",{"2":{"183":1}}],["encode",{"2":{"108":2,"183":7}}],["encoded",{"2":{"11":1,"104":1,"106":1,"183":3}}],["encouraging",{"2":{"160":1}}],["enclosed",{"2":{"131":1}}],["enclose",{"2":{"130":1}}],["enhance",{"2":{"127":1,"140":2,"150":1,"182":2}}],["enhancing",{"2":{"48":1,"140":1,"153":1,"183":1}}],["enigmatic",{"2":{"67":2,"183":2}}],["enough",{"2":{"67":2,"183":3}}],["ensuring",{"2":{"67":1,"150":1,"178":1,"183":1}}],["ensure",{"2":{"0":1,"55":2,"83":1,"93":3,"108":1,"127":2,"130":2,"142":2,"144":1,"145":1,"147":1,"152":1,"153":1,"160":1,"181":2,"182":4,"183":4}}],["ensures",{"2":{"0":1,"67":1,"183":1}}],["enabling",{"2":{"63":1,"181":1,"183":1}}],["enable",{"2":{"98":1,"108":1,"182":1,"183":2}}],["enables",{"2":{"11":1,"52":1,"64":2,"106":1,"182":8,"183":4}}],["enabled",{"2":{"11":1,"64":1,"106":1,"182":2}}],["enjoy",{"2":{"41":1}}],["en",{"2":{"30":1,"181":2}}],["engaging",{"2":{"171":1}}],["engagement",{"2":{"140":3}}],["engage",{"2":{"30":1}}],["english",{"2":{"64":1,"182":5}}],["engineer",{"2":{"150":1}}],["engineering",{"2":{"18":1,"150":1}}],["engine",{"2":{"49":1,"108":1,"114":2,"126":1,"178":1}}],["enumerated",{"2":{"183":2}}],["enumerates",{"2":{"182":4}}],["enumerate",{"2":{"64":1,"182":1}}],["enum",{"2":{"20":1,"93":2}}],["entire",{"2":{"183":15}}],["entity",{"2":{"126":1}}],["entities",{"2":{"13":1,"20":1,"67":1,"114":1,"126":1,"183":3}}],["entry",{"2":{"60":1,"64":1,"149":1,"182":1,"183":1}}],["entries",{"2":{"6":1,"108":2,"183":1}}],["enter",{"2":{"9":2}}],["end|>",{"2":{"183":2}}],["end>",{"2":{"183":1}}],["ending",{"2":{"183":1}}],["end=25",{"2":{"182":1}}],["ended",{"2":{"182":1,"183":1}}],["end`",{"2":{"130":1,"167":2,"168":1,"169":1,"175":2}}],["end",{"2":{"7":1,"20":3,"22":1,"31":1,"34":1,"46":1,"54":1,"55":11,"58":2,"60":1,"64":4,"67":1,"79":1,"93":6,"96":2,"108":5,"167":1,"175":1,"181":15,"182":12,"183":26}}],["endpoints",{"2":{"0":1,"183":1}}],["endpoint",{"2":{"0":2,"64":2,"138":5,"179":1,"182":6,"183":9}}],["environments",{"2":{"55":1,"181":1}}],["environment",{"0":{"84":1},"2":{"32":1,"49":1,"58":1,"69":2,"78":1,"83":1,"84":2,"98":1,"183":11}}],["env",{"2":{"0":1,"24":1,"26":1,"29":2,"30":1,"31":1,"69":2,"78":3,"83":1,"84":1,"182":1,"183":7}}],["eg",{"2":{"0":1,"2":1,"6":1,"7":2,"8":1,"9":1,"11":9,"13":2,"14":1,"16":2,"22":6,"24":2,"25":1,"27":2,"28":1,"37":1,"42":1,"54":5,"55":10,"57":1,"58":3,"60":6,"61":1,"62":1,"63":3,"64":15,"66":4,"67":3,"71":2,"72":1,"74":3,"79":3,"80":1,"82":2,"83":2,"84":2,"89":1,"90":2,"91":4,"93":2,"94":1,"100":3,"101":3,"102":1,"103":1,"104":3,"105":2,"106":10,"108":3,"114":1,"160":2,"167":1,"175":1,"180":2,"181":15,"182":40,"183":63}}],["hd",{"2":{"183":3}}],["hh",{"2":{"152":2,"153":3}}],["huggingface",{"2":{"182":6}}],["hundred",{"2":{"183":2}}],["hundredth",{"2":{"82":1}}],["hundreds",{"2":{"80":2}}],["humans",{"2":{"183":1}}],["human",{"2":{"18":1,"152":1,"153":1,"183":4}}],["href=",{"2":{"67":1,"182":1,"183":6}}],["htmlstyler",{"2":{"182":13,"183":1}}],["html",{"2":{"58":2,"182":15,"183":1}}],["https",{"2":{"21":1,"37":1,"67":2,"80":1,"108":2,"112":1,"179":1,"181":2,"182":7,"183":18}}],["http",{"2":{"11":2,"24":1,"28":1,"62":3,"64":2,"79":1,"106":2,"179":1,"182":5,"183":60}}],["hcat",{"2":{"46":1,"182":4,"183":3}}],["hit",{"2":{"182":2,"183":3}}],["his",{"2":{"94":2,"183":2}}],["history",{"2":{"55":1,"93":1,"103":1,"181":5,"182":1,"183":110}}],["hint",{"2":{"55":2,"181":2}}],["hints",{"2":{"55":1,"181":1}}],["hi",{"2":{"22":2,"23":1,"24":3,"26":2,"27":1,"28":1,"29":3,"30":1,"31":1,"34":3,"37":1,"39":1,"40":1,"42":2,"54":2,"55":2,"72":1,"92":3,"94":3,"97":6,"98":3,"160":1,"181":12,"183":24}}],["highly",{"2":{"121":2,"127":1,"138":1}}],["highlevel",{"2":{"55":2,"181":2}}],["highlighted",{"2":{"183":3}}],["highlights",{"0":{"49":1,"52":1,"57":1,"66":1}}],["highlighting",{"2":{"21":1,"58":1,"141":1,"142":1,"182":1,"183":2}}],["highlight",{"2":{"11":1,"57":1,"64":1,"130":1,"141":1,"152":1,"182":1}}],["higher",{"2":{"18":1,"55":1,"60":1,"64":2,"67":1,"181":3,"182":5,"183":8}}],["highest",{"2":{"7":1,"58":1,"182":1}}],["high",{"2":{"3":1,"9":3,"58":2,"60":2,"64":2,"105":1,"107":2,"157":1,"161":1,"163":1,"165":1,"170":1,"173":1,"182":6,"183":5}}],["hmm",{"2":{"13":1,"41":1,"183":1}}],["hyderephraser",{"2":{"182":3,"183":1}}],["hyde",{"2":{"63":1,"124":1,"125":1,"182":3}}],["hypothetical",{"2":{"8":1,"124":3,"125":2,"182":2}}],["hybrid",{"2":{"8":1,"63":1,"64":2,"182":4}}],["her",{"2":{"114":1}}],["here>",{"2":{"167":1,"175":1}}],["here",{"2":{"9":1,"24":1,"26":1,"29":1,"30":1,"31":2,"32":1,"34":1,"41":1,"42":1,"55":1,"60":1,"67":1,"89":1,"107":1,"181":1,"183":26}}],["hence",{"2":{"100":1,"183":3}}],["heals",{"2":{"182":1}}],["healing",{"2":{"93":1,"106":1}}],["heavy",{"2":{"108":1}}],["heavily",{"2":{"91":1,"114":1}}],["heavens",{"2":{"67":1,"183":1}}],["hear",{"2":{"67":2,"183":2}}],["hearty",{"2":{"94":2,"183":2}}],["heart",{"2":{"35":1}}],["header",{"2":{"183":3}}],["headers",{"2":{"2":1,"79":1,"91":1,"183":12}}],["headings",{"2":{"164":1}}],["headlines",{"2":{"160":1}}],["head",{"2":{"41":1}}],["he",{"2":{"20":1,"183":7}}],["height",{"2":{"20":2,"183":12}}],["held",{"2":{"64":1,"182":1}}],["hello",{"2":{"13":1,"15":1,"23":1,"24":1,"26":1,"30":1,"31":1,"39":1,"40":1,"42":1,"55":3,"67":3,"92":1,"181":1,"183":30}}],["helping",{"2":{"182":1}}],["helpful",{"2":{"20":1,"24":1,"26":1,"34":1,"48":1,"55":4,"92":1,"97":1,"99":1,"121":3,"122":1,"130":1,"141":2,"157":1,"161":1,"163":1,"170":1,"181":2,"183":8}}],["helpfulness",{"2":{"6":1,"121":1}}],["helps",{"2":{"20":1,"183":1}}],["help",{"2":{"13":1,"20":1,"22":1,"24":3,"26":3,"30":1,"31":1,"34":1,"35":1,"39":1,"40":1,"41":2,"42":1,"54":1,"64":1,"87":1,"108":1,"152":1,"182":1,"183":7}}],["helper",{"2":{"9":1,"11":1,"37":1,"106":1,"183":2}}],["horizontal",{"2":{"182":1}}],["holding",{"2":{"182":3}}],["hold",{"2":{"181":1}}],["holds",{"2":{"2":1,"64":1,"108":1,"182":3}}],["hope",{"2":{"56":1,"57":1,"93":1,"107":1,"117":1,"118":1}}],["honor",{"2":{"55":1,"181":1}}],["hosting",{"2":{"29":1,"89":1}}],["host",{"2":{"29":2,"183":11}}],["hosted",{"2":{"25":1,"64":4,"75":1,"83":1,"100":1,"101":1,"108":2,"182":4,"183":2}}],["hood",{"2":{"2":1,"19":1,"24":1,"26":1,"28":1,"52":1,"58":1,"99":1,"107":2,"108":1,"183":2}}],["however",{"2":{"3":2,"9":1,"16":1,"80":1,"182":1,"183":1}}],["how",{"0":{"82":1,"83":1,"92":1,"93":1,"94":1,"96":1,"97":1,"99":1},"1":{"100":1,"101":1,"102":1,"103":1,"104":1,"105":1,"106":1,"107":1,"108":1},"2":{"0":2,"7":1,"8":1,"9":4,"11":1,"12":1,"14":4,"22":1,"23":1,"24":2,"26":2,"30":1,"32":1,"36":1,"37":1,"54":1,"55":4,"58":7,"60":1,"64":2,"67":2,"76":4,"79":1,"90":1,"92":2,"93":3,"99":2,"103":1,"104":1,"107":1,"108":3,"121":4,"122":1,"140":3,"142":2,"167":1,"175":1,"181":5,"182":6,"183":32}}],["happy",{"2":{"183":1}}],["happened",{"2":{"131":1}}],["happens",{"2":{"100":1,"152":1}}],["happening",{"2":{"58":1}}],["haiku",{"2":{"183":4}}],["hamming",{"2":{"182":9,"183":1}}],["half",{"2":{"64":1,"96":1,"182":1}}],["hallucination",{"2":{"18":1}}],["hackable",{"2":{"60":1}}],["harder",{"2":{"64":2,"182":2}}],["hard",{"2":{"14":1,"20":1,"41":1,"71":1,"81":2,"181":1}}],["had",{"2":{"9":1,"183":1}}],["handling",{"2":{"55":2,"93":1,"181":1,"183":2}}],["handles",{"2":{"93":1,"182":1,"183":1}}],["handlebars",{"2":{"72":1}}],["handlebar",{"2":{"13":1,"183":1}}],["handle",{"2":{"7":1,"58":1,"64":2,"93":2,"102":1,"108":1,"182":5,"183":10}}],["handcraft",{"2":{"3":1}}],["hash",{"2":{"182":1,"183":2}}],["hashed",{"2":{"64":5,"182":9,"183":1}}],["hasn",{"2":{"55":2,"181":2}}],["has",{"2":{"0":2,"16":1,"22":1,"28":1,"42":1,"55":4,"62":1,"79":2,"93":1,"117":1,"118":1,"130":1,"131":1,"142":1,"152":1,"181":2,"182":3,"183":23}}],["having",{"0":{"78":2},"2":{"0":1,"67":1,"105":1,"183":1}}],["have",{"0":{"92":1,"93":1,"95":1},"2":{"0":1,"5":1,"6":1,"7":6,"9":3,"11":2,"12":1,"13":5,"14":1,"16":1,"20":1,"22":2,"23":1,"24":5,"26":4,"27":1,"28":2,"30":1,"31":2,"34":1,"35":2,"36":1,"37":1,"41":4,"42":1,"52":2,"54":1,"55":1,"57":1,"58":1,"60":3,"64":4,"66":1,"69":1,"70":2,"79":2,"80":1,"82":1,"83":1,"90":1,"93":1,"94":2,"97":2,"102":2,"103":1,"105":3,"106":3,"108":4,"114":1,"115":1,"117":1,"118":1,"120":1,"125":1,"141":1,"159":1,"162":1,"164":2,"168":1,"169":1,"177":1,"181":3,"182":17,"183":32}}],["omit",{"2":{"154":1}}],["o1",{"2":{"83":7}}],["observability",{"2":{"183":2}}],["observe",{"2":{"60":1}}],["obj",{"2":{"108":3}}],["objective",{"2":{"125":1,"127":1}}],["objects",{"2":{"94":1,"100":1,"149":1,"182":2,"183":30}}],["object>",{"2":{"93":1}}],["object",{"2":{"6":1,"13":4,"22":2,"35":1,"54":2,"55":10,"63":1,"64":6,"71":1,"93":1,"98":1,"100":1,"101":1,"107":1,"108":12,"181":15,"182":10,"183":75}}],["obtained",{"2":{"183":1}}],["obtain",{"2":{"55":1,"64":2,"107":1,"181":1,"182":2}}],["ocean",{"2":{"67":4,"183":4}}],["occur",{"2":{"153":1}}],["occurrences",{"2":{"67":1,"183":1}}],["occurred",{"2":{"55":2,"181":3}}],["occursin",{"2":{"181":2}}],["occurs",{"2":{"55":1,"183":1}}],["ocrtask",{"0":{"178":1},"2":{"21":2,"183":4}}],["ocr",{"0":{"21":1},"2":{"21":1,"178":1,"183":4}}],["overrules",{"2":{"183":2}}],["overriden",{"2":{"183":1}}],["overrides",{"2":{"183":4}}],["override",{"2":{"55":2,"90":1,"91":1,"181":2}}],["overwrite",{"2":{"182":1,"183":1}}],["overwritten",{"2":{"55":1,"181":1}}],["overall",{"2":{"121":2,"140":1,"142":1,"183":3}}],["overarching",{"2":{"60":1}}],["overload",{"2":{"182":1}}],["overloaded",{"2":{"98":1}}],["overlaps",{"2":{"182":2}}],["overlapping",{"2":{"182":1}}],["overlap",{"2":{"64":1,"182":1}}],["overhead",{"2":{"28":1}}],["over",{"2":{"20":1,"114":1,"120":1,"152":1,"153":1,"156":1,"167":1,"168":1,"169":1,"175":1,"180":1,"183":12}}],["overview",{"0":{"11":1,"60":1,"106":1},"2":{"0":1,"80":1}}],["o",{"2":{"19":2,"93":4,"183":5}}],["olama",{"2":{"183":1}}],["oldest",{"2":{"183":1}}],["old",{"2":{"13":3}}],["ollamamanagedschema",{"2":{"23":1,"42":2,"183":12}}],["ollama",{"0":{"23":1,"37":1,"89":1},"1":{"38":1,"39":1,"40":1,"41":1,"42":1,"43":1,"44":1,"45":1,"46":1,"47":1},"2":{"0":2,"23":4,"28":1,"37":4,"42":1,"74":3,"75":1,"82":1,"88":1,"89":8,"90":2,"100":1,"101":1,"183":21}}],["ollamaschema",{"2":{"0":1,"37":1,"42":5,"47":1,"90":3,"183":3}}],["origin",{"2":{"183":1}}],["originated",{"2":{"183":3}}],["originally",{"2":{"183":2}}],["original",{"2":{"63":1,"67":4,"98":1,"117":6,"118":8,"121":1,"127":1,"128":1,"130":1,"178":1,"181":1,"182":11,"183":9}}],["oriented",{"2":{"159":2}}],["orientation",{"2":{"144":1,"145":1,"147":1}}],["oro1m",{"2":{"83":2}}],["oro1",{"2":{"83":2}}],["organization",{"2":{"140":1,"183":2}}],["organize",{"2":{"130":1,"152":2,"153":1}}],["org",{"2":{"67":1,"181":2,"183":1}}],["ordered",{"2":{"67":1,"183":2}}],["ordering",{"2":{"63":1,"181":4}}],["orders",{"2":{"21":1,"183":2}}],["order",{"2":{"6":1,"64":1,"67":1,"76":1,"112":1,"182":3,"183":1}}],["or",{"0":{"90":1,"91":1},"2":{"5":3,"6":3,"7":6,"8":1,"9":3,"11":8,"12":3,"14":1,"18":2,"19":4,"20":1,"21":1,"22":4,"24":3,"26":3,"29":1,"30":1,"31":1,"34":1,"37":2,"39":1,"41":1,"42":2,"43":1,"46":1,"50":1,"52":2,"54":3,"55":23,"56":1,"57":1,"60":5,"61":3,"63":2,"64":20,"66":1,"67":8,"69":1,"71":1,"74":2,"76":1,"78":2,"83":4,"89":1,"90":1,"91":1,"93":3,"94":2,"98":1,"100":2,"101":2,"103":2,"104":2,"105":2,"106":8,"112":1,"114":1,"117":1,"118":1,"120":2,"126":3,"127":2,"130":4,"131":1,"137":1,"138":1,"140":2,"141":5,"142":6,"152":2,"153":4,"158":3,"160":3,"164":1,"167":2,"168":1,"169":1,"171":1,"175":2,"177":1,"179":1,"181":24,"182":51,"183":187}}],["our",{"2":{"3":1,"6":1,"7":2,"22":1,"37":1,"54":1,"55":3,"76":4,"79":2,"93":6,"96":1,"107":2,"108":3,"126":2,"171":1,"181":3,"182":3,"183":6}}],["outside",{"2":{"171":1}}],["outlined",{"2":{"144":1,"145":1,"147":1}}],["outline",{"2":{"130":1,"162":2,"164":2}}],["outcomes",{"2":{"55":1,"93":1,"181":2}}],["outcome",{"2":{"52":1,"140":6,"141":6,"142":6,"171":5,"181":3,"183":2}}],["outer",{"2":{"6":1,"7":4}}],["outerjoin",{"2":{"6":1}}],["output`",{"2":{"22":1,"54":1,"55":1,"181":1}}],["outputs",{"2":{"7":1,"9":1,"21":1,"22":2,"54":1,"55":3,"108":1,"183":23}}],["output",{"0":{"22":1},"2":{"6":2,"7":8,"11":3,"22":4,"31":1,"52":4,"54":9,"55":43,"58":2,"64":1,"67":4,"93":7,"94":2,"98":1,"104":3,"106":3,"107":1,"108":17,"112":1,"144":1,"145":1,"147":1,"150":1,"154":2,"158":1,"164":1,"181":32,"182":11,"183":65}}],["out",{"2":{"2":1,"11":1,"22":9,"54":5,"55":36,"56":1,"64":4,"76":1,"79":1,"96":1,"106":1,"107":1,"126":1,"128":1,"181":35,"182":7,"183":11}}],["own",{"2":{"2":1,"16":1,"55":1,"56":1,"64":3,"91":1,"181":1,"182":4,"183":6}}],["otherwise",{"2":{"55":2,"79":1,"181":4,"182":1,"183":22}}],["others",{"2":{"35":1,"108":1,"152":1}}],["other",{"0":{"24":1,"27":1},"2":{"2":1,"24":2,"27":1,"32":1,"35":1,"36":1,"52":1,"55":4,"58":1,"64":3,"67":1,"69":1,"74":1,"75":1,"78":1,"84":1,"90":1,"91":1,"94":2,"98":1,"101":1,"114":1,"138":1,"140":1,"153":2,"154":1,"166":1,"174":1,"181":4,"182":5,"183":13}}],["ops",{"2":{"182":1}}],["op",{"2":{"182":10,"183":1}}],["opposite",{"2":{"182":1}}],["opposed",{"2":{"12":1,"183":1}}],["opportunity",{"2":{"117":1,"118":1}}],["opt",{"2":{"76":1}}],["option",{"2":{"138":2,"182":3,"183":2}}],["options",{"2":{"55":1,"60":1,"64":1,"69":1,"79":1,"90":1,"181":1,"182":2}}],["options=",{"2":{"37":2}}],["optional",{"2":{"2":2,"11":3,"55":2,"58":1,"63":1,"64":1,"106":3,"181":2,"182":2,"183":44}}],["optionally",{"2":{"2":1,"64":1,"67":1,"94":1,"108":1,"182":2,"183":4}}],["optimized",{"2":{"127":2,"181":1}}],["optimizes",{"2":{"124":1}}],["optimize",{"2":{"22":1,"181":1}}],["operate",{"2":{"22":1,"54":1,"55":1,"64":1,"181":1,"182":1}}],["operations",{"2":{"15":1,"55":2,"64":2,"182":2,"183":4}}],["operation",{"2":{"7":7,"181":1,"182":4,"183":1}}],["opens",{"2":{"182":2}}],["openrouteropenaischema",{"2":{"183":2}}],["openrouter",{"2":{"83":6,"183":6}}],["opened",{"2":{"79":1}}],["opentagger",{"2":{"64":1,"182":4,"183":1}}],["openhermes2",{"2":{"23":3,"37":1,"40":1,"47":1,"89":2,"183":10}}],["open",{"0":{"88":1},"2":{"12":1,"24":1,"26":1,"55":1,"57":1,"58":1,"66":1,"78":1,"88":1,"89":1,"108":2,"181":1,"182":1,"183":1}}],["opening",{"2":{"9":1}}],["openaistream",{"2":{"183":3}}],["openaischema",{"2":{"0":2,"98":5,"101":1,"102":3,"107":3,"183":43}}],["openaiapi",{"0":{"78":1}}],["openai",{"0":{"24":1,"27":1,"74":1,"75":1,"76":1,"77":1,"78":1,"79":1,"81":1,"83":1},"1":{"75":1},"2":{"0":4,"11":1,"17":1,"22":1,"24":3,"25":1,"26":1,"27":2,"42":1,"54":1,"55":1,"69":9,"74":1,"76":6,"77":4,"78":3,"79":2,"80":2,"81":3,"82":2,"83":1,"84":6,"85":2,"86":1,"91":3,"97":1,"100":2,"101":3,"102":3,"106":1,"107":6,"108":1,"114":1,"181":1,"182":1,"183":83}}],["ongoing",{"2":{"183":1}}],["online",{"2":{"69":1,"77":1,"183":4}}],["only",{"2":{"2":1,"7":8,"8":1,"9":5,"11":3,"13":1,"19":1,"22":6,"24":2,"27":2,"32":1,"42":1,"49":1,"52":2,"54":5,"55":15,"58":1,"63":1,"64":2,"67":2,"79":1,"90":1,"94":1,"97":2,"105":1,"106":4,"107":3,"108":2,"110":1,"112":1,"114":1,"117":3,"118":3,"120":1,"127":1,"130":1,"136":1,"138":1,"152":3,"153":1,"154":1,"157":1,"160":1,"161":1,"163":1,"165":1,"170":1,"171":1,"173":1,"181":23,"182":10,"183":75}}],["once",{"2":{"6":1,"7":1,"56":1,"89":1,"91":1,"96":1,"107":1,"162":1,"164":1,"183":7}}],["ones",{"2":{"9":1,"24":1,"27":1,"64":2,"75":1,"114":1,"182":2,"183":1}}],["one",{"0":{"5":1,"45":1},"2":{"5":1,"6":2,"9":1,"11":1,"13":1,"14":1,"16":1,"22":1,"25":1,"30":1,"31":1,"41":1,"42":1,"43":1,"45":1,"49":1,"54":1,"55":4,"58":1,"60":1,"62":1,"63":1,"64":1,"79":1,"82":2,"84":1,"93":2,"94":1,"105":1,"106":1,"131":1,"136":2,"138":2,"154":2,"160":1,"167":1,"175":1,"181":5,"182":10,"183":35}}],["on",{"2":{"0":1,"7":4,"9":2,"11":1,"12":1,"14":3,"18":2,"20":1,"22":3,"23":1,"24":2,"26":1,"30":1,"37":1,"42":1,"47":1,"54":3,"55":12,"57":1,"58":7,"60":1,"63":5,"64":8,"67":4,"69":1,"76":1,"77":1,"78":2,"79":1,"80":2,"81":1,"83":1,"84":2,"94":3,"105":1,"106":1,"108":6,"110":2,"112":4,"114":1,"117":2,"118":2,"120":1,"121":3,"122":3,"126":1,"130":3,"131":2,"132":1,"138":1,"140":3,"141":3,"142":3,"153":2,"158":2,"160":2,"164":1,"168":1,"169":1,"171":2,"177":1,"181":23,"182":18,"183":66}}],["office",{"2":{"160":1}}],["offloaded",{"2":{"37":1}}],["offload",{"2":{"28":1}}],["off",{"2":{"16":1,"84":1,"183":1}}],["offering",{"2":{"76":1}}],["offers",{"2":{"22":1}}],["offer",{"2":{"14":1,"16":1,"140":1,"142":1}}],["often",{"2":{"5":1,"6":1,"7":2,"12":1,"29":1,"64":2,"182":5,"183":21}}],["of",{"0":{"0":1,"54":1},"2":{"0":5,"2":5,"3":3,"4":1,"5":4,"6":5,"7":28,"8":1,"9":15,"11":7,"12":2,"13":3,"14":4,"15":1,"16":2,"17":1,"19":1,"20":4,"21":4,"22":5,"24":4,"26":4,"28":1,"29":1,"30":1,"31":3,"32":1,"35":1,"36":2,"37":1,"41":1,"42":1,"45":2,"46":1,"47":1,"50":4,"51":1,"52":8,"54":5,"55":34,"56":3,"57":7,"58":13,"60":7,"63":4,"64":59,"65":2,"66":4,"67":29,"70":2,"71":7,"72":6,"74":2,"76":2,"77":1,"78":2,"79":6,"80":3,"81":2,"82":4,"83":4,"88":1,"91":2,"93":5,"94":2,"96":5,"97":5,"98":2,"99":2,"100":3,"101":2,"102":3,"103":1,"104":2,"105":4,"106":7,"107":8,"108":15,"114":1,"120":4,"121":3,"126":1,"127":1,"130":10,"131":2,"136":2,"138":4,"140":3,"141":4,"142":3,"144":1,"152":8,"153":6,"154":2,"157":1,"158":2,"160":2,"161":1,"163":2,"165":2,"166":1,"167":7,"168":2,"169":1,"170":2,"171":4,"173":2,"174":1,"175":7,"177":3,"179":4,"181":75,"182":249,"183":429}}],["sse",{"2":{"183":2}}],["ss",{"2":{"152":2,"153":3}}],["sk",{"2":{"183":1}}],["skilled",{"2":{"160":1}}],["skips",{"2":{"94":1,"105":1,"182":2,"183":13}}],["skipped",{"2":{"55":1,"183":1}}],["skip",{"2":{"55":9,"64":5,"171":1,"181":2,"182":8,"183":7}}],["sky",{"2":{"67":1,"183":3}}],["svilupp",{"2":{"67":1,"182":1,"183":6}}],["slice",{"2":{"182":4}}],["slicing",{"2":{"168":1,"169":1}}],["sliding",{"2":{"182":3,"183":1}}],["slightly",{"2":{"67":1,"183":3}}],["slots",{"2":{"164":1}}],["slot",{"2":{"108":1}}],["slowly",{"2":{"72":1}}],["slow",{"2":{"72":1,"183":1}}],["sleep",{"2":{"79":1,"183":1}}],["slack",{"2":{"57":1,"66":1}}],["swiftly",{"2":{"67":1,"183":1}}],["switching",{"2":{"55":1,"181":1}}],["switch",{"2":{"12":2,"54":1}}],["swap",{"2":{"64":1,"182":2}}],["src",{"2":{"63":1,"67":1,"91":1,"182":1,"183":7}}],["sqrt",{"2":{"181":1}}],["square",{"2":{"58":1,"64":2,"182":2}}],["sqlcoder",{"2":{"89":1}}],["sqlservercentral",{"2":{"21":1,"183":2}}],["sql",{"2":{"21":3,"183":6}}],["snippet",{"2":{"87":1,"124":1}}],["snippets",{"2":{"55":1,"168":1,"169":1,"182":7,"183":1}}],["snow",{"2":{"67":1,"183":1}}],["snowball",{"2":{"56":1,"182":1}}],["smart",{"2":{"183":1}}],["smarter",{"2":{"183":1}}],["smallint",{"2":{"93":5}}],["small",{"2":{"7":1,"12":1,"24":1,"26":1,"63":1,"152":1,"166":1,"169":1,"174":1,"181":2,"183":4}}],["smaller",{"2":{"2":1,"67":6,"108":1,"181":1,"183":7}}],["smoke",{"2":{"67":1,"183":1}}],["smith",{"2":{"114":2}}],["smiling",{"2":{"42":1}}],["smiles",{"2":{"40":1,"41":1}}],["smirks",{"2":{"41":1}}],["shiny",{"2":{"183":1}}],["shift",{"2":{"182":1}}],["shimmering",{"2":{"67":2,"183":2}}],["shell",{"2":{"170":1}}],["shapley",{"2":{"171":1}}],["shap",{"2":{"171":10}}],["sharegptschema",{"2":{"183":3}}],["sharegpt",{"2":{"96":1}}],["share",{"2":{"69":1,"76":1,"77":1}}],["shared",{"2":{"62":2,"64":2,"67":1,"97":1,"167":1,"175":1,"182":2,"183":4}}],["sharing",{"2":{"9":1}}],["shallow",{"2":{"55":1,"181":1}}],["shall",{"2":{"13":1}}],["shot",{"2":{"181":1}}],["shortcut",{"2":{"182":1}}],["shortcuts",{"2":{"130":1,"131":1,"132":1}}],["short",{"2":{"55":1,"58":1,"67":1,"114":1,"120":1,"122":1,"152":2,"158":1,"160":1,"181":1,"183":7}}],["shorter",{"2":{"29":1}}],["should",{"2":{"2":1,"7":1,"13":1,"23":1,"35":1,"41":1,"55":1,"60":1,"64":2,"67":1,"69":1,"84":1,"85":1,"89":2,"93":2,"98":1,"104":1,"108":2,"112":3,"120":1,"138":2,"152":4,"153":4,"154":7,"160":2,"164":1,"171":2,"177":2,"181":6,"182":2,"183":37}}],["showcase",{"2":{"150":1}}],["shows",{"2":{"9":1,"21":1,"60":1,"96":1,"183":4}}],["show",{"2":{"2":1,"7":1,"55":2,"89":1,"93":1,"97":1,"181":3,"183":1}}],["side",{"2":{"107":1}}],["sister",{"2":{"96":1}}],["sink",{"2":{"183":1}}],["since",{"2":{"94":1,"107":1,"152":1}}],["singletons",{"2":{"183":5}}],["single",{"2":{"67":2,"96":1,"182":4,"183":11}}],["situations",{"2":{"74":1}}],["silent",{"2":{"67":2,"183":2}}],["sibblings",{"2":{"55":2,"181":2}}],["size`",{"2":{"183":1}}],["size=8",{"2":{"182":1}}],["size",{"2":{"45":2,"46":2,"47":1,"64":2,"182":15,"183":5}}],["sizes",{"2":{"8":1,"64":2,"182":11}}],["sig",{"2":{"108":3}}],["significant",{"2":{"120":1,"153":2}}],["signing",{"2":{"77":1}}],["sign",{"2":{"49":1}}],["signatures",{"2":{"55":1,"181":1,"183":2}}],["signature",{"2":{"22":1,"54":1,"60":1,"61":4,"108":7,"183":21}}],["sigh",{"2":{"41":1}}],["simultaneously",{"2":{"22":1,"54":1,"55":1,"181":1}}],["similarly",{"2":{"72":1,"183":1}}],["similarity",{"2":{"2":2,"8":1,"47":2,"63":2,"64":5,"66":1,"67":1,"182":33,"183":1}}],["similar",{"2":{"2":1,"11":2,"55":1,"64":3,"67":1,"103":1,"106":2,"181":2,"182":6,"183":6}}],["simplistic",{"2":{"183":1}}],["simplification",{"2":{"107":1}}],["simply",{"2":{"2":1,"9":1,"11":1,"21":1,"28":1,"29":1,"36":1,"55":1,"64":1,"66":1,"69":1,"71":2,"93":2,"94":2,"106":1,"117":1,"118":1,"149":1,"181":1,"182":10,"183":6}}],["simplebm25retriever",{"2":{"182":4,"183":1}}],["simpleanswerer",{"2":{"64":2,"182":8,"183":1}}],["simplegenerator",{"2":{"64":2,"182":5,"183":1}}],["simplerefiner",{"2":{"64":1,"182":6,"183":1}}],["simpleretriever",{"2":{"64":5,"182":11,"183":1}}],["simplerephraser",{"2":{"62":1,"182":4,"183":1}}],["simplest",{"2":{"60":1,"64":1,"92":1,"182":1,"183":4}}],["simpleindexer",{"2":{"58":2,"64":3,"182":9,"183":1}}],["simple",{"0":{"1":1,"34":1,"39":1,"45":1},"1":{"2":1},"2":{"7":1,"8":1,"11":1,"17":1,"23":1,"43":1,"47":1,"52":1,"55":1,"58":1,"79":2,"82":2,"89":1,"94":2,"106":1,"122":1,"134":1,"177":1,"181":1,"182":10,"183":15}}],["scene",{"2":{"183":1}}],["scenarios",{"2":{"55":1,"181":2,"182":1}}],["science",{"2":{"164":1,"171":1}}],["scientific",{"2":{"58":1}}],["scientist",{"2":{"9":2}}],["scans",{"2":{"183":3}}],["scanned",{"2":{"178":1}}],["scan",{"2":{"152":1,"153":1,"183":4}}],["scaled",{"2":{"182":1}}],["scale",{"2":{"58":1,"121":2,"122":2,"182":6,"183":1}}],["scoring=thompsonsampling",{"2":{"55":1,"181":1}}],["scoring",{"2":{"55":1,"121":1,"181":10,"182":1}}],["score==nothing",{"2":{"182":1}}],["scores2",{"2":{"182":3}}],["scores1",{"2":{"182":3}}],["scores=false",{"2":{"182":1}}],["scores",{"2":{"55":1,"58":1,"64":5,"121":1,"181":7,"182":28}}],["scored",{"2":{"22":1,"54":1,"55":1,"181":1}}],["scoreparametersstringstringstringsubstrin",{"2":{"7":1}}],["scoreretrieval",{"2":{"7":1}}],["score",{"2":{"6":3,"7":10,"11":2,"55":20,"58":1,"64":12,"121":2,"181":32,"182":35,"183":5}}],["scope",{"2":{"55":1,"181":1}}],["script",{"2":{"183":2}}],["scripting",{"2":{"170":2}}],["scratch",{"2":{"55":1,"183":2}}],["scratches",{"2":{"41":1}}],["screenshot",{"2":{"21":3,"178":1,"183":4}}],["scrollable",{"2":{"9":1}}],["schema=json3",{"2":{"108":2}}],["schema=openaischema",{"2":{"98":1,"183":1}}],["schema=myschema",{"2":{"91":1,"181":1}}],["schema=pt",{"2":{"90":1}}],["schema",{"0":{"42":1,"90":1},"2":{"0":5,"11":2,"23":5,"24":2,"26":1,"27":1,"29":1,"30":1,"31":1,"37":1,"39":1,"42":7,"45":2,"46":2,"47":2,"55":4,"90":5,"91":2,"97":2,"98":7,"101":1,"102":1,"106":2,"107":7,"108":5,"181":5,"183":205}}],["schemas",{"0":{"102":1},"2":{"0":1,"91":1,"98":1,"100":1,"102":2,"183":4}}],["satisfactory",{"2":{"141":2}}],["satisfy",{"2":{"130":1,"142":1}}],["saving",{"2":{"64":1,"182":2,"183":2}}],["saverschema",{"2":{"98":7,"183":16}}],["saves",{"2":{"55":1,"181":2,"182":1,"183":4}}],["saved",{"2":{"9":2,"12":1,"55":2,"64":1,"69":1,"94":1,"98":1,"105":1,"181":2,"182":4,"183":7}}],["save",{"2":{"2":2,"4":1,"7":1,"9":4,"14":1,"32":1,"55":2,"63":2,"69":1,"77":1,"82":2,"94":3,"96":3,"98":2,"108":1,"181":1,"183":29}}],["safety",{"2":{"67":1,"183":1}}],["safely",{"2":{"55":1,"183":1}}],["safe",{"2":{"55":4,"183":6}}],["sampling",{"2":{"181":5,"183":2}}],["samplenode",{"2":{"55":25,"181":45,"183":1}}],["sample",{"2":{"55":23,"181":58,"182":1,"183":11}}],["samples=1",{"2":{"55":2,"181":2}}],["samples=2`",{"2":{"22":1,"54":1,"55":1,"181":1}}],["samples",{"2":{"22":3,"52":3,"54":3,"55":33,"181":55,"183":4}}],["same",{"2":{"6":1,"11":2,"12":1,"22":2,"34":1,"52":3,"54":1,"55":2,"64":3,"67":2,"69":1,"79":1,"84":1,"93":2,"94":1,"104":1,"106":2,"107":1,"130":1,"131":1,"181":9,"182":18,"183":18}}],["salty",{"2":{"94":2,"183":2}}],["salt",{"2":{"20":2}}],["san",{"2":{"20":1}}],["says",{"2":{"80":1,"140":2,"141":2,"142":2}}],["say",{"2":{"13":1,"22":2,"23":1,"24":3,"26":2,"27":1,"28":1,"29":3,"30":1,"31":1,"34":2,"37":1,"39":1,"40":1,"42":2,"54":2,"55":2,"72":1,"92":1,"94":3,"97":6,"98":3,"107":1,"110":1,"112":1,"117":1,"118":1,"181":11,"183":22}}],["said",{"2":{"11":1,"52":1,"106":1,"183":2}}],["sonnet",{"2":{"183":1}}],["sort",{"2":{"183":1}}],["sorted",{"2":{"182":1}}],["sorry",{"2":{"183":3}}],["soft",{"2":{"81":2}}],["solve",{"2":{"166":1,"168":1,"169":1,"174":2}}],["solving",{"2":{"142":1,"166":2,"168":2,"169":2,"174":1}}],["solution",{"2":{"78":1,"130":2,"166":1,"168":1,"169":1,"174":1,"183":2}}],["solutions",{"2":{"69":1}}],["solid",{"2":{"67":1,"182":1,"183":6}}],["source=",{"2":{"182":1}}],["source2",{"2":{"64":1,"182":1}}],["source1",{"2":{"64":1,"182":1}}],["sourced",{"2":{"11":1}}],["source",{"0":{"88":1},"2":{"5":1,"6":1,"9":1,"11":1,"14":1,"24":1,"26":1,"50":1,"55":14,"58":1,"64":17,"67":7,"88":1,"94":1,"108":2,"110":1,"112":1,"114":1,"115":1,"117":1,"118":1,"120":1,"121":1,"122":1,"124":1,"125":1,"126":1,"127":1,"128":1,"130":1,"131":1,"132":1,"134":1,"136":1,"137":1,"138":1,"140":1,"141":1,"142":1,"144":1,"145":1,"147":2,"149":1,"150":1,"152":1,"153":1,"154":1,"157":1,"158":1,"159":1,"160":1,"161":1,"162":1,"163":1,"164":1,"165":1,"166":1,"167":2,"168":1,"169":1,"170":1,"171":1,"173":1,"174":1,"175":2,"177":1,"178":1,"179":2,"180":1,"181":38,"182":159,"183":193}}],["sources=false",{"2":{"182":1}}],["sources=map",{"2":{"58":1}}],["sources",{"2":{"4":2,"6":1,"58":3,"64":17,"182":50}}],["so",{"2":{"4":1,"7":1,"9":1,"16":2,"19":1,"22":4,"24":2,"26":2,"30":1,"31":2,"36":1,"42":1,"54":1,"55":1,"60":1,"64":3,"67":1,"78":1,"79":2,"80":1,"83":1,"85":1,"94":2,"103":1,"105":1,"107":3,"108":5,"114":1,"130":1,"131":1,"152":1,"180":1,"181":3,"182":4,"183":22}}],["sometimes",{"2":{"108":1,"118":1}}],["something",{"2":{"30":1,"31":1,"41":1,"42":1,"107":1,"160":2}}],["somewhere",{"2":{"105":1}}],["some",{"2":{"2":2,"7":1,"8":1,"9":2,"11":1,"21":2,"22":2,"23":1,"27":1,"28":1,"37":1,"42":2,"54":1,"55":1,"58":4,"60":3,"63":1,"64":2,"66":2,"67":1,"69":1,"83":3,"89":1,"91":1,"93":1,"103":1,"105":2,"106":1,"108":3,"117":2,"153":1,"162":1,"164":1,"166":1,"174":1,"177":1,"181":1,"182":12,"183":29}}],["sync",{"2":{"183":1}}],["synced",{"2":{"69":1,"77":1}}],["synthetic",{"2":{"182":2}}],["syntactically",{"2":{"142":1}}],["syntax",{"2":{"2":1,"9":4,"14":1,"21":1,"22":1,"54":1,"55":1,"79":1,"93":1,"94":1,"96":1,"108":1,"142":3,"160":1,"165":1,"168":3,"169":3,"173":1,"181":2,"183":8}}],["synonyms",{"2":{"126":2}}],["symphony",{"2":{"67":1,"183":1}}],["symbols",{"2":{"182":3,"183":3}}],["symbolic",{"2":{"97":1}}],["symbol=",{"2":{"64":1,"181":1,"182":1}}],["symbol",{"2":{"6":1,"9":3,"14":2,"58":1,"64":3,"94":2,"105":1,"181":6,"182":32,"183":54}}],["system+user",{"2":{"181":1}}],["systematic",{"2":{"166":1,"168":1,"169":1,"174":1}}],["system=",{"2":{"94":1,"98":1,"105":1,"183":3}}],["systems",{"2":{"11":1,"67":2,"106":1,"120":1,"183":5}}],["systemmessage",{"2":{"9":3,"13":3,"35":1,"41":1,"83":1,"92":1,"94":2,"97":3,"104":1,"105":2,"107":1,"183":13}}],["system",{"0":{"1":1,"60":1},"1":{"2":1},"2":{"3":1,"9":2,"13":1,"14":2,"18":2,"36":2,"55":1,"58":2,"60":2,"83":3,"84":1,"94":3,"97":3,"102":1,"104":1,"105":3,"107":1,"110":1,"112":1,"114":1,"115":1,"117":1,"118":1,"120":1,"121":1,"122":1,"124":1,"125":1,"126":1,"127":1,"128":1,"130":1,"131":1,"132":1,"134":1,"136":1,"137":1,"138":1,"140":1,"141":1,"142":1,"144":1,"145":1,"147":1,"149":5,"150":2,"152":1,"153":1,"154":1,"157":1,"158":1,"159":1,"160":1,"161":1,"162":1,"163":1,"164":1,"165":1,"166":1,"167":1,"168":1,"169":1,"170":1,"171":1,"173":1,"174":1,"175":1,"177":1,"178":1,"181":2,"183":105}}],["steer",{"2":{"183":3}}],["steam",{"2":{"183":3}}],["stemmer",{"2":{"182":2}}],["stemmer=nothing",{"2":{"182":1}}],["stemming",{"2":{"182":1}}],["steroids",{"2":{"108":1}}],["step=4",{"2":{"182":1}}],["steps`",{"2":{"153":1}}],["steps",{"2":{"9":1,"58":3,"60":1,"63":1,"64":4,"106":1,"107":1,"130":4,"152":1,"153":14,"160":1,"166":1,"168":1,"169":1,"182":9,"183":2}}],["step",{"2":{"2":1,"8":1,"22":2,"54":2,"55":3,"56":1,"58":4,"60":7,"62":3,"63":2,"64":10,"93":1,"94":1,"97":2,"105":1,"107":4,"108":1,"117":1,"118":1,"124":1,"125":1,"127":1,"128":1,"130":6,"131":4,"132":2,"140":2,"141":2,"142":2,"144":2,"162":2,"164":2,"166":2,"174":2,"181":3,"182":21,"183":2}}],["stop",{"2":{"81":1,"182":1,"183":2}}],["stopwords",{"2":{"66":2,"182":4}}],["stood",{"2":{"67":1,"183":1}}],["storage",{"2":{"182":2,"183":1}}],["storyteller",{"2":{"171":1}}],["storytellerexplainshap",{"0":{"171":1}}],["storytelling",{"2":{"171":1}}],["story",{"2":{"67":4,"171":6,"183":4}}],["storing",{"2":{"64":1,"182":8}}],["store",{"2":{"92":1,"94":3,"183":17}}],["stored",{"2":{"55":2,"64":1,"181":2,"182":1}}],["stores",{"2":{"11":1,"52":1,"55":2,"106":1,"181":2,"182":3,"183":3}}],["stipple",{"2":{"58":1}}],["still",{"2":{"1":1,"58":1,"78":1,"183":2}}],["stylistic",{"2":{"140":2}}],["styling",{"2":{"58":1,"182":5}}],["styled",{"2":{"182":1}}],["styles=",{"2":{"182":3}}],["styles",{"2":{"182":10,"183":2}}],["styler=rt",{"2":{"182":4}}],["styler",{"2":{"182":21,"183":1}}],["style=",{"2":{"67":1,"182":1,"183":6}}],["style",{"2":{"2":1,"5":2,"6":1,"7":11,"64":1,"67":1,"72":1,"97":1,"140":2,"144":1,"145":1,"147":1,"150":1,"167":1,"171":1,"175":1,"182":12,"183":12}}],["stdout",{"2":{"55":12,"181":1,"183":23}}],["stub",{"2":{"183":1}}],["stumbled",{"2":{"35":1,"183":1}}],["study",{"2":{"152":1}}],["studying",{"2":{"5":1,"7":1,"153":1}}],["studies",{"2":{"58":1}}],["studied",{"2":{"5":1,"7":1}}],["studio",{"0":{"32":1},"1":{"33":1,"34":1,"35":1,"36":1},"2":{"32":2,"36":1,"183":2}}],["strong",{"2":{"183":4}}],["strongly",{"2":{"11":1}}],["streaming",{"2":{"183":17}}],["streamed",{"2":{"183":5}}],["streamchunk",{"2":{"183":8}}],["streamcallback",{"2":{"183":42}}],["stream",{"2":{"182":3,"183":18}}],["strength",{"2":{"182":2}}],["stranger",{"2":{"67":1,"183":1}}],["strategies",{"2":{"63":1}}],["strategy",{"2":{"2":1,"63":5,"64":2,"182":5,"183":2}}],["strict",{"2":{"140":1,"144":1,"145":1,"147":1,"183":19}}],["strictly",{"2":{"63":1,"120":1,"153":1,"167":1,"175":1}}],["strin",{"2":{"108":1}}],["string=",{"2":{"67":2,"183":3}}],["strings",{"2":{"7":1,"64":3,"66":3,"67":5,"182":16,"183":19}}],["string",{"0":{"40":1},"2":{"7":4,"9":8,"11":1,"14":5,"20":1,"22":1,"31":2,"34":1,"42":1,"54":1,"55":17,"58":6,"64":4,"66":6,"67":30,"71":2,"93":4,"94":5,"97":1,"98":1,"106":1,"107":2,"108":15,"159":1,"167":1,"168":1,"169":1,"175":1,"179":1,"181":16,"182":34,"183":275}}],["stripping",{"2":{"128":1}}],["strip",{"2":{"67":1,"126":1,"128":1,"183":1}}],["struggle",{"2":{"55":1,"181":1}}],["structural",{"2":{"140":1}}],["structures",{"2":{"150":1,"168":1,"169":1,"181":1}}],["structured",{"2":{"11":1,"20":1,"31":1,"60":1,"106":1,"108":1,"144":1,"145":1,"147":1,"183":8}}],["structure",{"2":{"11":1,"20":1,"55":4,"67":3,"106":1,"152":2,"153":3,"154":1,"178":1,"181":3,"182":2,"183":9}}],["structtypes",{"2":{"108":1}}],["structs",{"2":{"64":1,"104":1,"108":1,"182":1,"183":2}}],["struct",{"2":{"11":2,"20":4,"31":1,"52":2,"55":4,"58":1,"60":3,"64":1,"91":1,"93":5,"106":2,"108":7,"114":1,"181":4,"182":8,"183":48}}],["str",{"0":{"71":1},"2":{"16":1,"67":1,"71":2,"72":4,"182":1,"183":29}}],["stands",{"2":{"183":1}}],["standards",{"2":{"140":1}}],["standard",{"0":{"40":1},"2":{"55":2,"64":1,"168":1,"169":1,"182":7,"183":15}}],["stays",{"2":{"79":1,"120":1,"181":1}}],["stage",{"2":{"64":1,"97":1,"182":13}}],["stabilizes",{"2":{"56":1,"181":1}}],["stark",{"2":{"183":1}}],["stars",{"2":{"67":3,"183":3}}],["star",{"2":{"13":1,"35":1,"41":1,"183":5}}],["start|>assistant",{"2":{"183":1}}],["start|>user",{"2":{"183":1}}],["start>system",{"2":{"183":1}}],["start=1",{"2":{"182":1}}],["startup",{"2":{"87":2,"183":1}}],["starter",{"2":{"58":1}}],["started",{"0":{"68":1},"1":{"69":1,"70":1,"71":1,"72":1},"2":{"56":1,"74":1}}],["starting",{"2":{"55":1,"136":1,"138":1,"181":2,"183":3}}],["starts",{"2":{"55":1,"181":2,"183":2}}],["startswith",{"2":{"55":1,"181":1}}],["start",{"0":{"71":1},"2":{"1":1,"9":2,"20":1,"28":2,"69":2,"71":1,"81":1,"84":1,"99":1,"160":1,"181":1,"182":8,"183":2}}],["status",{"2":{"181":2,"183":23}}],["statistical",{"2":{"58":1}}],["statistics",{"2":{"1":1}}],["stats",{"2":{"55":23,"181":33}}],["stated",{"2":{"142":1}}],["stateless",{"2":{"103":1}}],["state",{"2":{"20":1,"55":2,"74":1,"108":1,"181":2,"183":1}}],["statements",{"2":{"93":1,"167":1,"175":1,"183":1}}],["statement",{"2":{"18":2,"82":1,"103":1,"104":1,"137":4,"138":1,"183":6}}],["states",{"2":{"6":1}}],["spillover",{"2":{"183":3}}],["spiders",{"2":{"183":1}}],["spider",{"2":{"19":1,"93":1,"183":1}}],["splatting",{"2":{"182":1}}],["spliter",{"2":{"95":1}}],["splitters",{"2":{"67":1,"183":1}}],["splitter",{"2":{"66":2,"67":10,"95":2,"182":1,"183":13}}],["splitting",{"2":{"64":2,"67":11,"182":3,"183":12}}],["splits",{"2":{"64":1,"67":1,"182":7,"183":1}}],["split",{"2":{"8":1,"22":2,"54":2,"55":1,"64":1,"66":3,"67":14,"95":3,"152":1,"160":1,"181":4,"182":4,"183":16}}],["speaking",{"2":{"98":1,"183":3}}],["speak",{"2":{"94":3,"171":1,"183":4}}],["spend",{"2":{"81":1}}],["spending",{"0":{"81":1},"2":{"71":1,"77":1,"81":1}}],["speeds",{"2":{"182":3}}],["speed",{"2":{"46":1}}],["spec",{"2":{"183":2}}],["specs",{"2":{"183":1}}],["specialist",{"2":{"136":1,"138":1}}],["specializes",{"2":{"154":1}}],["specialized",{"2":{"55":1,"124":1,"140":1,"141":1,"181":2,"183":1}}],["specializing",{"2":{"125":1,"127":1}}],["special",{"2":{"114":4,"115":2,"120":3,"152":2,"153":2,"156":2,"167":2,"168":2,"169":2,"171":2,"175":5,"182":2}}],["specifying",{"2":{"64":1,"182":1,"183":1}}],["specify",{"2":{"16":1,"34":1,"62":1,"64":2,"67":1,"90":2,"108":1,"130":1,"182":4,"183":19}}],["specified",{"2":{"63":1,"64":1,"67":2,"98":1,"140":1,"154":2,"181":3,"182":6,"183":12}}],["specifies",{"2":{"12":1,"55":1,"181":1,"183":1}}],["specification",{"2":{"102":1,"108":1,"183":1}}],["specifications",{"2":{"67":1,"108":1,"183":1}}],["specifically",{"2":{"0":1,"114":1,"183":2}}],["specific",{"2":{"0":2,"6":1,"9":1,"11":1,"14":2,"24":2,"26":2,"30":1,"31":1,"42":1,"57":1,"66":1,"67":2,"96":1,"105":1,"106":1,"108":2,"120":1,"124":2,"126":3,"130":2,"131":1,"140":1,"141":2,"142":3,"144":1,"145":1,"147":1,"153":2,"168":1,"169":1,"171":1,"182":1,"183":13}}],["spectacles",{"2":{"41":1}}],["span",{"2":{"182":1}}],["spanish",{"2":{"15":1}}],["sparrow",{"2":{"94":4,"183":5}}],["sparse",{"2":{"63":1,"182":5}}],["sparsearrays",{"2":{"1":2,"56":1,"182":4}}],["spain",{"2":{"71":2,"72":2}}],["spawn",{"2":{"46":1,"183":1}}],["spaces",{"2":{"182":3}}],["space",{"2":{"24":1,"26":1,"67":1,"183":3}}],["s",{"2":{"1":1,"2":2,"3":1,"4":1,"5":2,"6":2,"7":2,"9":6,"11":2,"12":2,"14":1,"16":1,"19":1,"20":4,"21":1,"22":1,"23":3,"32":2,"36":1,"37":1,"39":1,"40":1,"42":1,"43":1,"52":1,"54":1,"55":12,"56":1,"58":5,"60":2,"62":1,"64":8,"67":7,"74":1,"76":3,"79":1,"82":1,"83":2,"84":2,"89":3,"91":1,"92":5,"93":4,"94":3,"95":1,"97":1,"100":1,"102":2,"105":1,"106":2,"107":3,"108":14,"117":1,"118":1,"121":3,"122":2,"130":2,"131":1,"138":1,"140":8,"141":5,"142":11,"144":2,"145":2,"147":3,"150":1,"152":2,"153":1,"160":2,"162":2,"164":2,"167":3,"171":2,"175":1,"181":19,"182":24,"183":96}}],["seq",{"2":{"183":5}}],["sequentially",{"2":{"46":1,"55":2,"181":2,"183":1}}],["sequences",{"2":{"67":1,"182":1,"183":1}}],["sequence",{"2":{"22":2,"67":2,"103":1,"183":6}}],["sedan",{"2":{"183":1}}],["september",{"2":{"83":1}}],["separator=",{"2":{"67":1,"183":1}}],["separator",{"2":{"67":9,"181":1,"183":9}}],["separators",{"2":{"64":1,"67":19,"95":4,"182":7,"183":20}}],["separators=",{"2":{"60":1,"64":1,"67":4,"182":1,"183":4}}],["separated",{"2":{"181":1,"182":1,"183":1}}],["separate",{"2":{"2":1,"18":1,"56":1,"160":1,"164":1,"180":1,"181":2,"182":2,"183":3}}],["segments",{"2":{"67":2,"183":4}}],["segment",{"2":{"64":1,"182":1}}],["selects",{"2":{"181":1}}],["selecting",{"2":{"183":1}}],["selection",{"2":{"181":2}}],["selectively",{"2":{"55":1,"181":1}}],["select",{"2":{"58":1,"71":1,"114":1,"136":1,"138":3,"158":1,"181":7,"183":1}}],["selected",{"2":{"14":1,"28":1,"64":1,"97":1,"158":5,"182":1,"183":3}}],["self",{"2":{"51":1,"93":1,"106":1,"130":1,"181":1,"183":1}}],["sessions",{"2":{"85":1,"181":1,"183":2}}],["session",{"2":{"42":1,"55":1,"87":1,"181":7,"183":1}}],["several",{"2":{"23":1,"37":1,"91":1,"108":1,"130":1,"154":1,"160":2,"183":5}}],["seven",{"2":{"7":1}}],["secret",{"2":{"69":1,"77":1,"171":1}}],["secrets",{"2":{"67":4,"183":4}}],["sections",{"2":{"153":2,"160":4,"164":1,"183":2}}],["section",{"2":{"18":1,"23":1,"37":1,"56":1,"69":1,"72":1,"74":1,"77":1,"99":1,"152":2,"153":2,"160":1,"164":1}}],["seconds",{"2":{"11":1,"12":1,"21":2,"23":1,"24":1,"26":1,"30":1,"31":1,"55":3,"71":2,"72":1,"79":1,"92":1,"106":1,"181":2,"183":18}}],["second",{"2":{"7":4,"64":1,"67":3,"92":1,"107":3,"181":1,"182":4,"183":6}}],["sense",{"2":{"108":1,"183":1}}],["sensitive",{"2":{"23":1,"66":1,"89":1,"183":1}}],["sender",{"2":{"183":4}}],["sends",{"2":{"64":1,"179":1,"182":1}}],["send",{"2":{"55":1,"79":1,"92":1,"93":1,"102":1,"103":1,"107":1,"181":1,"183":5}}],["sending",{"2":{"24":1,"27":1,"66":1,"92":1,"94":2,"183":14}}],["senior",{"2":{"9":2}}],["sentences",{"2":{"58":3,"64":3,"66":3,"67":3,"95":1,"160":1,"171":1,"182":11,"183":4}}],["sentence",{"2":{"31":1,"58":1,"64":4,"66":1,"67":6,"82":1,"95":2,"108":3,"182":6,"183":6}}],["sentiment",{"2":{"18":1}}],["sent",{"0":{"97":1},"2":{"0":1,"23":1,"80":1,"89":1,"97":3,"102":1,"108":1,"182":3,"183":7}}],["seas",{"2":{"94":2,"183":2}}],["seats",{"2":{"93":2}}],["sea",{"2":{"67":1,"183":1}}],["searches",{"2":{"183":3}}],["searching",{"2":{"182":1,"183":2}}],["search",{"2":{"9":3,"12":1,"14":1,"17":1,"22":2,"49":1,"50":11,"52":1,"55":2,"58":4,"63":2,"64":4,"94":1,"112":2,"114":2,"115":2,"117":1,"118":12,"125":2,"126":3,"127":5,"179":12,"180":1,"181":4,"182":32,"183":8}}],["seamless",{"0":{"12":1},"2":{"0":1}}],["semantic",{"2":{"8":1,"17":1,"127":1,"182":3}}],["semi",{"2":{"6":1,"7":4}}],["semijoin",{"2":{"6":1}}],["setter",{"2":{"182":1}}],["settings",{"2":{"84":1,"181":1}}],["setting",{"0":{"81":1,"85":1},"2":{"55":1,"62":1,"95":1,"181":1,"183":1}}],["setpropertynested",{"2":{"64":2,"182":7,"183":1}}],["setup",{"0":{"89":1},"2":{"23":1,"69":1,"74":1,"84":1,"88":1,"167":1,"175":1,"181":1}}],["sets",{"2":{"4":1,"5":4,"6":2,"7":5,"64":1,"108":1,"120":1,"181":1,"182":5,"183":3}}],["set",{"0":{"7":1,"78":2},"2":{"3":2,"4":1,"7":5,"8":1,"9":1,"11":2,"13":1,"16":1,"19":1,"22":2,"24":1,"27":1,"29":2,"30":2,"31":2,"42":2,"49":1,"51":1,"54":2,"55":20,"56":1,"57":2,"58":1,"60":2,"64":5,"65":1,"66":1,"67":1,"69":5,"77":1,"78":2,"79":1,"81":2,"83":1,"84":5,"85":2,"90":1,"92":1,"93":1,"106":1,"108":2,"114":1,"127":1,"152":1,"153":1,"154":1,"167":1,"169":1,"171":1,"175":1,"181":13,"182":18,"183":56}}],["seem",{"2":{"183":1}}],["seems",{"2":{"183":2}}],["seel",{"2":{"55":1,"181":1}}],["seek",{"2":{"13":1,"35":1,"41":1}}],["see",{"0":{"97":1},"2":{"2":3,"7":1,"8":3,"9":4,"11":4,"14":3,"18":1,"20":1,"22":1,"23":1,"24":3,"26":2,"32":1,"37":2,"50":1,"52":2,"55":11,"56":1,"58":4,"63":1,"64":9,"69":2,"72":1,"74":1,"75":1,"79":2,"80":1,"84":2,"85":2,"86":1,"88":1,"89":2,"90":1,"92":1,"93":1,"95":1,"97":4,"98":1,"99":1,"102":1,"105":2,"106":4,"108":8,"112":1,"179":1,"181":17,"182":40,"183":86}}],["serializable",{"2":{"183":4}}],["serialization",{"2":{"1":1,"94":1,"105":1}}],["serialized",{"2":{"98":1}}],["serializes",{"2":{"98":1}}],["series",{"2":{"67":1,"83":1,"183":2}}],["serves",{"2":{"58":1}}],["serve",{"2":{"35":1,"89":1}}],["serverpreference",{"2":{"183":1}}],["server`",{"2":{"183":1}}],["servers",{"2":{"183":1}}],["server",{"0":{"28":1},"2":{"0":2,"25":1,"28":4,"37":1,"183":16}}],["services",{"2":{"76":1}}],["service",{"2":{"24":1,"26":1,"76":1,"89":1}}],["sum",{"2":{"183":3}}],["summarizing",{"2":{"152":1,"153":1,"154":1}}],["summarize",{"2":{"130":1,"152":2,"153":1}}],["summary",{"2":{"67":1,"152":1,"153":5,"168":1,"169":1,"171":1,"177":1,"183":1}}],["sunny",{"2":{"183":10}}],["sunnweiwei",{"2":{"112":1,"182":1}}],["sun",{"2":{"182":3}}],["suitability",{"2":{"142":1}}],["suitable",{"2":{"23":1,"67":1,"138":1,"144":1,"145":1,"147":1,"158":1,"160":2,"183":2}}],["suggesting",{"2":{"183":1}}],["suggestion",{"2":{"141":1,"142":1}}],["suggestions",{"2":{"9":1,"140":5,"141":5,"142":5}}],["suggests",{"2":{"150":1}}],["suggested",{"2":{"130":1}}],["suggest",{"2":{"130":1,"131":1}}],["suffixed",{"2":{"183":1}}],["suffix",{"2":{"55":3,"183":5}}],["suffering",{"2":{"35":1,"183":2}}],["super",{"2":{"152":1,"153":1}}],["supertype",{"2":{"101":1}}],["superseded",{"2":{"42":1}}],["supplied",{"2":{"11":1,"52":1,"55":1,"106":1,"181":1}}],["suppose",{"2":{"5":1,"7":1}}],["supports",{"2":{"0":1,"182":2,"183":2}}],["support",{"2":{"0":1,"24":2,"27":1,"32":1,"57":1,"58":2,"64":11,"76":1,"83":1,"182":22,"183":13}}],["supported",{"2":{"0":1,"11":1,"43":1,"57":1,"64":2,"91":1,"108":2,"182":4,"183":13}}],["survey",{"2":{"154":5,"156":1}}],["surrounding",{"2":{"64":1,"182":7}}],["surface",{"2":{"14":1,"60":1,"158":1,"183":2}}],["sure",{"2":{"4":1,"7":1,"8":1,"37":2,"55":1,"64":1,"69":2,"84":2,"108":1,"138":1,"162":1,"164":1,"181":1,"182":5,"183":4}}],["subdocumenttermmatrix",{"2":{"182":2,"183":1}}],["subchunkindex",{"2":{"182":5,"183":1}}],["subcomponents",{"2":{"64":1,"182":1}}],["subject",{"2":{"160":2}}],["subheadings",{"2":{"152":2}}],["subheading",{"2":{"152":1}}],["submitted",{"2":{"76":1,"140":1}}],["sub",{"2":{"58":1,"60":1,"64":6,"82":1,"107":1,"154":1,"182":18,"183":4}}],["subseq",{"2":{"183":5}}],["subsequence",{"2":{"66":6,"67":17,"183":25}}],["subsequent",{"2":{"55":1,"67":1,"93":1,"181":3,"183":3}}],["subset",{"2":{"167":1,"175":1,"182":5}}],["substantial",{"2":{"153":1}}],["substring",{"2":{"58":2,"182":2,"183":5}}],["subarray",{"2":{"45":1}}],["subfolder",{"2":{"12":3}}],["subfolders",{"2":{"12":1}}],["subtype",{"2":{"0":1,"101":1}}],["subtypes",{"2":{"0":1,"55":2,"61":2,"64":3,"91":1,"102":2,"181":2,"182":4}}],["suceeding",{"2":{"182":1}}],["succinct",{"2":{"124":1}}],["successfully",{"2":{"55":2,"126":1,"181":1,"183":3}}],["successful",{"2":{"55":5,"93":1,"108":1,"115":1,"181":8,"183":2}}],["success",{"2":{"4":1,"55":7,"93":2,"181":13,"183":9}}],["succeeding",{"2":{"2":1}}],["such",{"2":{"0":1,"55":1,"98":1,"114":1,"140":3,"142":1,"158":1,"181":1,"182":2,"183":10}}],["iobuffer",{"2":{"182":1}}],["io",{"2":{"182":17,"183":24}}],["illustrated",{"2":{"155":1,"156":1}}],["illustrate",{"2":{"154":1,"161":1,"163":1,"170":1}}],["i>macro",{"2":{"183":1}}],["i>method",{"2":{"182":1,"183":5}}],["i>",{"2":{"67":1,"182":1,"183":6}}],["i>function",{"2":{"67":1}}],["iphone",{"2":{"13":4,"35":1,"41":2,"183":7}}],["ignored",{"2":{"183":1}}],["ignores",{"2":{"183":8}}],["ignore",{"2":{"12":4,"171":1}}],["immediate",{"2":{"183":1}}],["immediately",{"2":{"94":1,"177":1,"182":2,"183":1}}],["im",{"2":{"183":2}}],["imagine",{"2":{"12":1,"82":1}}],["image",{"0":{"21":1},"2":{"11":2,"21":7,"43":6,"106":2,"177":7,"178":1,"183":89}}],["images",{"0":{"43":1},"2":{"11":2,"21":1,"42":1,"43":1,"50":2,"104":2,"106":2,"179":2,"183":19}}],["improper",{"2":{"142":1}}],["improving",{"2":{"15":1,"55":1,"141":1,"142":1,"181":1}}],["improved",{"2":{"130":1,"131":1}}],["improvements",{"2":{"130":2,"131":2,"140":1,"142":1}}],["improvement",{"2":{"76":1,"130":4,"140":1,"141":1}}],["improves",{"2":{"20":1}}],["improve",{"2":{"8":1,"22":1,"54":1,"55":1,"63":2,"76":3,"118":1,"124":1,"125":1,"126":1,"127":1,"128":1,"130":6,"132":1,"141":1,"181":2,"182":2,"183":2}}],["imprints",{"2":{"67":1,"183":1}}],["impact",{"2":{"58":1,"120":1,"140":2}}],["impartial",{"2":{"18":1,"121":2,"122":1,"137":1}}],["impermanence",{"2":{"13":1}}],["imported",{"2":{"180":1}}],["imports",{"2":{"55":2,"183":7}}],["important",{"2":{"8":1,"64":2,"114":2,"126":1,"152":2,"154":1,"162":1,"164":1,"182":2,"183":5}}],["import",{"2":{"1":1,"9":2,"22":1,"32":1,"37":1,"48":1,"51":1,"56":1,"66":1,"90":1,"182":1,"183":2}}],["implementing",{"2":{"183":1}}],["implement",{"2":{"54":1,"55":1,"58":1,"181":1,"183":1}}],["implements",{"2":{"52":1,"181":2}}],["implemented",{"2":{"0":1,"106":1,"182":4}}],["implementations",{"2":{"58":1,"61":1}}],["implementation",{"2":{"0":1,"58":1,"91":2,"112":1,"130":1,"131":2,"182":12,"183":7}}],["ie",{"2":{"9":1,"11":3,"55":4,"56":1,"63":1,"64":2,"66":1,"67":2,"79":1,"82":1,"94":2,"95":1,"104":1,"106":3,"108":1,"181":7,"182":16,"183":19}}],["irrelevant",{"2":{"8":1,"118":1}}],["idx",{"2":{"182":3}}],["idiomatic",{"2":{"130":1}}],["id`",{"2":{"55":1,"181":1}}],["ids",{"2":{"7":2,"136":1,"138":1,"182":3,"183":21}}],["id",{"2":{"7":11,"55":53,"58":1,"64":3,"98":2,"136":1,"181":69,"182":19,"183":77}}],["id=",{"2":{"7":2,"67":1,"182":1,"183":6}}],["idempotent",{"2":{"183":12}}],["identity",{"2":{"182":1,"183":5}}],["identifies",{"2":{"183":2}}],["identified",{"2":{"126":1,"154":1,"182":2}}],["identifiers",{"2":{"64":1,"112":1,"114":1,"182":1,"183":2}}],["identifier",{"2":{"64":1,"112":1,"114":1,"181":1,"182":6,"183":11}}],["identifying",{"2":{"183":2}}],["identify",{"2":{"114":1,"130":1,"141":1,"142":1,"153":1,"154":1}}],["identical",{"2":{"0":1,"66":1,"108":1,"182":1}}],["ideal",{"2":{"162":1,"183":2}}],["ideally",{"2":{"1":1,"2":1,"3":1,"158":2,"183":2}}],["ideas",{"2":{"8":1,"177":1}}],["idea",{"2":{"5":1,"14":1,"183":3}}],["i",{"0":{"75":1,"83":1,"96":1,"97":1},"2":{"2":1,"9":1,"12":1,"13":5,"14":4,"22":2,"23":1,"24":5,"26":5,"30":3,"31":5,"34":2,"35":2,"39":1,"40":1,"41":5,"42":1,"54":2,"55":3,"58":1,"67":2,"92":4,"93":2,"95":1,"97":5,"107":1,"108":7,"115":1,"130":2,"131":2,"132":2,"181":10,"182":6,"183":50}}],["if",{"0":{"75":1,"83":1},"2":{"1":1,"2":1,"9":4,"11":5,"14":1,"17":1,"18":1,"19":1,"20":1,"21":1,"22":2,"23":1,"24":2,"26":2,"27":1,"28":1,"32":1,"37":1,"39":1,"42":2,"52":2,"54":3,"55":40,"57":2,"58":2,"60":1,"62":2,"63":3,"64":21,"66":1,"67":8,"71":1,"72":1,"78":5,"79":5,"80":2,"82":2,"89":1,"90":1,"91":2,"92":1,"93":6,"94":5,"98":3,"102":1,"106":4,"108":6,"110":1,"114":2,"115":2,"117":5,"118":6,"120":2,"130":3,"131":1,"137":1,"138":2,"140":2,"141":3,"142":3,"152":4,"153":2,"154":1,"156":1,"160":2,"167":3,"168":1,"169":2,"171":2,"174":1,"175":3,"181":47,"182":60,"183":228}}],["inactive",{"2":{"183":2}}],["inactived",{"2":{"183":1}}],["inanimate",{"2":{"35":1}}],["inherit",{"2":{"182":1}}],["inefficiencies",{"2":{"142":1}}],["inline",{"2":{"130":3}}],["initializes",{"2":{"183":1}}],["initialized",{"2":{"183":11}}],["initialize",{"2":{"183":14}}],["initialisms",{"2":{"126":1}}],["initiate",{"2":{"55":1,"181":1,"183":1}}],["injects",{"2":{"126":1}}],["injected",{"2":{"112":1,"182":1}}],["inject",{"2":{"71":1,"183":1}}],["involve",{"2":{"183":2}}],["involved",{"2":{"0":1}}],["inverse",{"2":{"182":2}}],["investigating",{"2":{"182":3}}],["investigate",{"2":{"58":1}}],["invalid",{"2":{"55":2,"64":2,"182":2,"183":2}}],["infinitely",{"2":{"183":2}}],["inferred",{"2":{"183":1}}],["inferfaces",{"2":{"183":1}}],["influential",{"2":{"171":1}}],["influence",{"2":{"55":1,"108":1,"181":1}}],["informal",{"2":{"160":1}}],["informative",{"2":{"152":1,"153":1,"164":1}}],["information",{"2":{"0":2,"6":1,"11":1,"18":1,"20":1,"24":3,"26":3,"29":1,"30":1,"50":1,"52":1,"55":2,"56":1,"60":1,"64":4,"76":1,"79":1,"80":1,"86":1,"88":1,"89":1,"90":1,"95":1,"97":1,"100":1,"103":1,"104":2,"106":1,"108":1,"110":2,"117":2,"118":1,"120":4,"121":1,"122":1,"126":2,"128":1,"144":1,"145":1,"147":1,"152":1,"153":1,"160":1,"168":1,"169":1,"171":1,"179":1,"181":5,"182":6,"183":69}}],["informed",{"2":{"6":1}}],["info",{"2":{"4":1,"7":2,"11":1,"12":2,"21":2,"23":1,"24":1,"26":1,"30":1,"31":1,"55":10,"58":1,"67":2,"71":2,"72":1,"92":2,"93":2,"106":1,"108":1,"181":10,"183":13}}],["inplace",{"2":{"181":6}}],["inplace=true",{"2":{"55":1,"181":1}}],["input=",{"2":{"108":2}}],["input2",{"2":{"67":3,"183":3}}],["input1",{"2":{"67":3,"183":3}}],["inputclassifier",{"0":{"136":1},"2":{"19":1,"93":1,"183":3}}],["inputs",{"2":{"11":2,"22":1,"52":3,"54":1,"94":1,"100":1,"102":1,"105":1,"106":3,"108":1,"167":1,"175":1,"182":3,"183":6}}],["input",{"2":{"11":2,"19":4,"64":1,"93":2,"106":2,"108":3,"127":1,"136":7,"138":3,"171":1,"182":18,"183":23}}],["inches",{"2":{"183":1}}],["incredible",{"2":{"152":1}}],["increase",{"2":{"52":1,"81":1,"181":1,"183":3}}],["incorporating",{"2":{"182":2}}],["incorrect",{"2":{"142":1}}],["inconsistencies",{"2":{"141":1}}],["inconsistent",{"2":{"121":1}}],["incomplete",{"2":{"121":1,"183":3}}],["including",{"2":{"11":1,"18":1,"25":1,"52":1,"89":1,"108":1,"171":1,"181":2,"182":3,"183":20}}],["includes",{"2":{"7":1,"64":2,"88":1,"181":2,"182":6,"183":1}}],["included",{"2":{"7":1,"131":1,"153":1,"180":1,"182":2,"183":5}}],["include",{"2":{"2":1,"7":1,"11":1,"50":9,"58":2,"64":1,"106":1,"124":1,"125":1,"126":1,"142":1,"152":2,"153":1,"167":1,"175":1,"179":9,"182":9,"183":14}}],["indentation",{"2":{"182":1}}],["independent",{"2":{"181":1}}],["index>",{"2":{"182":1}}],["indexing",{"2":{"64":1,"168":1,"169":1,"182":1}}],["indexes",{"2":{"182":7}}],["indexed",{"2":{"63":2,"64":1,"182":1}}],["indexer",{"2":{"61":1,"64":12,"182":13}}],["index",{"2":{"2":16,"3":1,"4":2,"6":1,"7":2,"8":3,"11":1,"57":5,"58":11,"60":5,"61":5,"62":2,"63":5,"64":54,"182":205,"183":2}}],["industry",{"2":{"126":1}}],["indifferent",{"2":{"67":1,"183":1}}],["individual",{"2":{"55":2,"80":1,"121":1,"181":2,"182":2,"183":1}}],["indication",{"2":{"154":1}}],["indicating",{"2":{"64":1,"138":2,"181":1,"182":13,"183":21}}],["indicate",{"2":{"140":1,"142":1,"181":2}}],["indicated",{"2":{"112":1}}],["indicates",{"2":{"55":3,"171":1,"181":4,"183":1}}],["indices",{"2":{"8":1,"64":1,"182":23,"183":2}}],["inserted",{"2":{"183":2}}],["insert",{"2":{"183":1}}],["inserting",{"2":{"182":1}}],["insufficient",{"2":{"80":1}}],["inside",{"2":{"55":2,"183":3}}],["insights",{"2":{"6":1,"152":5}}],["inspired",{"2":{"22":1,"55":1,"124":1,"125":1,"171":1,"181":1,"183":3}}],["inspect",{"2":{"14":1,"55":2,"181":1,"183":5}}],["instructor",{"2":{"183":3}}],["instruction",{"2":{"182":3,"183":1}}],["instructions>",{"2":{"175":4}}],["instructions=",{"2":{"64":1,"114":1,"115":1,"120":1,"152":1,"153":1,"154":1,"167":1,"169":1,"171":1,"175":1,"182":2}}],["instructions",{"2":{"4":1,"36":1,"64":2,"103":1,"105":1,"110":1,"114":9,"115":5,"117":1,"118":1,"120":9,"121":1,"130":3,"136":1,"138":1,"140":6,"141":1,"142":2,"144":1,"145":1,"147":1,"152":7,"153":6,"154":3,"156":4,"158":2,"160":2,"164":1,"167":7,"168":4,"169":6,"171":9,"175":6,"177":1,"182":2,"183":1}}],["instruct",{"2":{"28":1,"108":1}}],["installation",{"0":{"70":1},"2":{"89":1}}],["installated",{"2":{"37":1}}],["installing",{"2":{"55":1,"183":2}}],["installed",{"2":{"23":1,"70":2,"89":1}}],["instant",{"0":{"87":1}}],["instantiating",{"2":{"182":1}}],["instantiation",{"2":{"55":2,"183":2}}],["instantiated",{"2":{"11":1,"52":1,"106":1,"183":1}}],["instances",{"2":{"183":2}}],["instance",{"2":{"11":2,"20":1,"52":2,"55":2,"60":1,"106":2,"171":3,"181":18,"183":5}}],["instead",{"2":{"9":1,"16":1,"67":1,"95":1,"130":1,"131":1,"171":1,"182":2,"183":8}}],["innerjoin",{"2":{"7":2}}],["inner",{"2":{"6":1,"7":5,"98":1}}],["int32",{"2":{"183":1}}],["intricate",{"2":{"183":5}}],["intro",{"2":{"67":2}}],["introduced",{"2":{"42":1}}],["introduction",{"0":{"48":1,"51":1,"56":1},"1":{"49":1,"50":1,"52":1,"53":1,"54":1,"55":1,"57":1,"58":1,"59":1,"60":1,"61":1,"62":1,"63":1,"64":1},"2":{"5":1,"131":1}}],["int=60",{"2":{"182":1}}],["int=3",{"2":{"182":1}}],["int=32000",{"2":{"181":1}}],["int=35000",{"2":{"67":4,"183":4}}],["int=1",{"2":{"181":1}}],["int=512",{"2":{"55":1,"181":1}}],["int64",{"2":{"7":3,"9":1,"14":1,"45":1,"58":4,"94":1,"183":3}}],["int",{"2":{"7":1,"20":2,"55":18,"64":2,"67":2,"93":5,"181":31,"182":38,"183":49}}],["into",{"0":{"12":1},"2":{"2":3,"11":1,"18":1,"19":1,"35":1,"45":1,"55":1,"56":1,"58":1,"63":1,"64":7,"66":6,"67":6,"70":1,"87":1,"94":1,"97":2,"106":1,"108":4,"126":1,"128":1,"152":2,"153":2,"154":2,"181":2,"182":25,"183":32}}],["intelligent",{"2":{"112":1}}],["intelligence",{"2":{"17":1}}],["intent",{"2":{"127":1}}],["intention",{"2":{"60":1,"180":1}}],["intended",{"2":{"55":1,"63":2,"125":1,"127":1,"140":4,"181":2,"182":1,"183":2}}],["intends",{"2":{"36":1}}],["integrity",{"2":{"140":1}}],["integrates",{"2":{"58":1,"181":1}}],["integration",{"0":{"12":1},"2":{"0":2,"167":1,"175":1}}],["integer=1",{"2":{"55":1,"181":2}}],["integer",{"2":{"50":1,"64":10,"93":1,"179":1,"182":33,"183":15}}],["integers",{"2":{"7":1,"182":1,"183":3}}],["intersection",{"2":{"182":1}}],["interpolate",{"2":{"183":1}}],["interpolated",{"2":{"183":1}}],["interpolation",{"0":{"40":1},"2":{"71":1,"168":1,"169":1,"183":6}}],["interprets",{"2":{"150":1}}],["interested",{"2":{"108":1,"183":1}}],["interesting",{"2":{"41":1}}],["internally",{"2":{"64":1,"182":1}}],["internal",{"2":{"60":1,"64":1,"91":1,"160":3,"182":1,"183":1}}],["interface",{"0":{"59":1},"1":{"60":1,"61":1,"62":1,"63":1},"2":{"56":1,"63":1,"183":5}}],["intermediate",{"2":{"6":1,"58":2}}],["interaction",{"2":{"181":2}}],["interactions",{"2":{"171":1,"181":5,"183":2}}],["interactive",{"2":{"55":1,"58":1,"181":2}}],["interact",{"2":{"1":1,"11":1,"21":1,"52":1,"55":2,"101":1,"106":1,"181":2}}],["in",{"0":{"2":1,"86":1},"2":{"0":1,"1":2,"2":9,"3":1,"4":2,"6":3,"7":24,"8":4,"9":15,"11":8,"12":6,"13":1,"14":3,"15":1,"16":3,"18":1,"19":1,"20":4,"21":4,"22":5,"23":2,"24":3,"26":2,"28":3,"30":2,"31":2,"32":3,"35":1,"36":1,"37":1,"39":1,"41":1,"42":2,"49":1,"50":4,"52":1,"54":3,"55":54,"57":4,"58":15,"60":6,"62":1,"63":3,"64":23,"65":1,"66":4,"67":7,"69":4,"71":4,"72":1,"74":1,"76":2,"78":6,"79":3,"80":1,"81":1,"82":1,"83":2,"84":4,"85":2,"87":1,"89":2,"91":2,"92":4,"93":4,"94":7,"96":2,"97":1,"98":1,"105":2,"106":8,"107":7,"108":16,"112":2,"114":3,"120":4,"121":3,"124":1,"125":2,"126":4,"127":1,"128":1,"130":7,"131":4,"132":1,"136":1,"138":1,"140":4,"141":2,"142":4,"144":4,"145":3,"147":2,"152":2,"153":2,"154":3,"157":1,"158":1,"160":7,"161":3,"163":2,"164":2,"165":1,"166":2,"167":1,"170":3,"173":1,"174":5,"175":3,"178":1,"179":4,"180":2,"181":65,"182":134,"183":268}}],["itr2",{"2":{"67":2,"183":2}}],["itr1",{"2":{"67":2,"183":2}}],["iters",{"2":{"183":1}}],["iterative",{"2":{"181":1,"183":1}}],["iteratively",{"2":{"67":2,"132":1,"181":2,"183":2}}],["iterating",{"2":{"181":1}}],["iterations",{"2":{"131":1}}],["iteration",{"2":{"67":1,"181":2,"183":1}}],["iterates",{"2":{"183":1}}],["iterate",{"2":{"55":1,"181":1}}],["itemsextract",{"2":{"183":10}}],["items",{"2":{"7":2,"8":1,"60":1,"64":2,"108":1,"114":2,"126":1,"182":10,"183":13}}],["item",{"2":{"4":1,"5":1,"6":1,"7":3,"55":1,"64":1,"181":1,"182":11,"183":1}}],["itself",{"2":{"20":1,"54":1,"64":1,"67":1,"182":3,"183":4}}],["its",{"2":{"9":1,"11":2,"13":1,"22":2,"32":1,"52":3,"54":2,"55":2,"58":2,"62":1,"64":4,"67":4,"90":1,"94":1,"95":1,"98":1,"101":1,"104":1,"106":2,"117":1,"118":1,"120":1,"127":1,"140":2,"141":1,"153":3,"164":1,"168":1,"169":1,"171":3,"177":1,"181":6,"182":11,"183":12}}],["it",{"0":{"82":2,"99":1},"1":{"100":1,"101":1,"102":1,"103":1,"104":1,"105":1,"106":1,"107":1,"108":1},"2":{"0":4,"2":4,"4":2,"5":1,"7":1,"8":2,"9":11,"11":14,"12":4,"13":4,"14":4,"16":1,"18":1,"19":3,"20":2,"22":8,"23":3,"24":2,"26":1,"28":5,"29":3,"30":3,"31":4,"32":3,"36":1,"37":1,"39":1,"40":1,"41":3,"42":4,"43":1,"49":1,"52":5,"54":6,"55":33,"56":2,"57":1,"58":2,"60":1,"64":17,"66":1,"67":14,"69":6,"71":1,"74":1,"77":2,"78":4,"79":3,"80":2,"81":1,"82":2,"83":1,"84":6,"85":1,"89":4,"90":2,"92":4,"93":8,"94":13,"95":1,"96":2,"97":2,"98":2,"99":3,"100":1,"101":1,"102":3,"103":2,"104":1,"105":4,"106":14,"107":8,"108":17,"117":2,"118":2,"120":1,"121":1,"124":1,"125":2,"126":1,"127":2,"128":2,"130":5,"131":5,"132":1,"136":1,"137":3,"138":2,"140":3,"142":2,"144":1,"150":1,"152":3,"154":2,"156":1,"160":3,"162":2,"164":2,"166":1,"167":4,"168":1,"169":1,"171":2,"174":1,"175":4,"177":1,"180":2,"181":63,"182":82,"183":224}}],["isolate",{"2":{"183":1}}],["istracermessage",{"2":{"183":2}}],["isextracted",{"2":{"183":4}}],["isn",{"2":{"108":1,"117":2,"183":3}}],["isnothing",{"2":{"7":1,"55":1,"93":1,"181":1,"182":1,"183":1}}],["issues",{"2":{"130":2,"142":1}}],["issue",{"2":{"57":1,"64":1,"66":1,"78":1,"130":2,"182":4}}],["islowercase",{"2":{"55":1,"181":1}}],["isvalid",{"2":{"22":1,"54":1,"55":4,"181":1,"183":4}}],["isa",{"2":{"11":4,"55":2,"93":1,"98":3,"106":4,"108":1,"181":2,"183":4}}],["is",{"0":{"82":1,"97":2},"2":{"0":7,"1":2,"2":4,"3":1,"5":2,"6":5,"7":18,"8":1,"9":5,"11":11,"12":3,"13":1,"14":4,"16":1,"18":4,"20":4,"22":9,"23":2,"28":5,"29":1,"30":3,"31":5,"32":1,"35":1,"36":1,"37":2,"39":1,"40":2,"41":4,"42":2,"43":1,"47":1,"48":1,"49":1,"50":9,"51":1,"52":6,"54":8,"55":44,"56":2,"57":2,"58":2,"60":4,"62":2,"63":7,"64":53,"67":21,"71":8,"72":5,"76":2,"78":5,"79":3,"80":1,"82":3,"83":2,"84":1,"88":2,"89":2,"91":2,"92":4,"93":5,"94":1,"96":1,"97":2,"98":1,"99":2,"101":3,"103":1,"104":2,"105":3,"106":13,"107":14,"108":16,"115":1,"117":3,"118":2,"120":3,"121":5,"124":1,"125":1,"126":3,"127":4,"128":2,"130":2,"131":2,"136":1,"137":3,"138":1,"140":1,"141":3,"142":2,"144":1,"145":1,"150":1,"152":1,"153":3,"157":1,"158":1,"159":1,"160":3,"161":1,"162":2,"163":1,"164":3,"165":1,"166":3,"167":2,"168":1,"169":1,"170":1,"171":1,"173":2,"174":4,"175":3,"177":1,"179":9,"180":5,"181":66,"182":173,"183":276}}],["dtm",{"2":{"182":2}}],["dynamically",{"2":{"183":2}}],["dynamic",{"2":{"181":1}}],["duplicates",{"2":{"182":1}}],["duplication",{"2":{"64":1,"182":1}}],["due",{"2":{"55":1,"183":2}}],["during",{"2":{"7":1,"55":3,"181":2,"182":1,"183":3}}],["drawn",{"2":{"182":5}}],["draft",{"2":{"160":1}}],["drafts",{"2":{"160":1}}],["drafteremailbrief",{"0":{"160":1}}],["driven",{"2":{"183":1}}],["drives",{"2":{"181":1}}],["drive",{"2":{"108":1}}],["dry",{"2":{"97":4,"183":26}}],["drops",{"2":{"55":1,"181":1}}],["dr",{"2":{"39":1,"114":2}}],["d",{"2":{"30":1,"31":1,"82":1,"95":1,"182":2,"183":1}}],["dllama",{"2":{"29":3}}],["dspy",{"2":{"22":1,"55":1,"181":1}}],["datetime",{"2":{"183":4}}],["date",{"2":{"114":1,"183":2}}],["dates",{"2":{"114":1}}],["datatype",{"2":{"183":2}}],["data=",{"2":{"183":1}}],["datadeps",{"2":{"182":2}}],["data>",{"2":{"144":4,"145":4,"174":4}}],["datamessage",{"2":{"11":2,"45":2,"46":1,"47":1,"104":1,"106":2,"183":14}}],["dataframerowsourcecontextquestionanswerretrieval",{"2":{"7":1}}],["dataframe",{"2":{"7":11,"9":1,"14":1,"183":2}}],["dataframeswhat",{"2":{"7":1}}],["dataframesmeta",{"2":{"1":1,"2":1}}],["dataframes",{"2":{"1":1,"2":3,"5":1,"9":1,"14":1,"114":2,"182":1,"183":2}}],["dataset",{"2":{"6":1,"96":2,"171":1}}],["database",{"2":{"2":1,"5":2,"6":1,"7":11,"126":2}}],["databricks",{"0":{"29":1,"91":1},"2":{"0":1,"29":9,"64":1,"91":1,"182":1,"183":16}}],["databricksopenaischema",{"2":{"0":1,"29":2,"91":2,"183":3}}],["data",{"0":{"20":1,"76":1},"2":{"0":2,"2":4,"5":5,"6":4,"7":34,"8":2,"9":6,"11":1,"20":3,"23":1,"45":1,"55":9,"58":4,"60":3,"63":1,"76":7,"89":1,"104":1,"106":1,"108":2,"114":1,"144":6,"145":6,"147":6,"149":1,"159":5,"164":1,"166":6,"168":1,"171":1,"174":4,"181":18,"182":3,"183":22}}],["damaging",{"2":{"95":1}}],["day",{"2":{"82":1}}],["dashboard",{"2":{"81":1,"182":1}}],["dashboards",{"2":{"58":1}}],["dance",{"2":{"67":1,"183":1}}],["danced",{"2":{"67":1,"183":1}}],["dangerous",{"2":{"35":1,"183":1}}],["darkness",{"2":{"35":1}}],["daphodil",{"2":{"19":1,"183":1}}],["dall",{"2":{"11":1,"106":1,"183":5}}],["diagnostics",{"2":{"182":1}}],["diagram",{"0":{"61":1},"1":{"62":1},"2":{"60":1}}],["dimensionality",{"2":{"182":4}}],["dimension",{"2":{"182":10}}],["diligent",{"2":{"159":1}}],["dilemma",{"2":{"13":1}}],["dir",{"2":{"98":2,"183":19}}],["direction",{"2":{"183":3}}],["direct",{"2":{"154":1}}],["directly",{"2":{"55":1,"62":2,"64":4,"67":1,"81":1,"91":1,"94":2,"105":1,"120":2,"125":1,"127":1,"182":8,"183":8}}],["directory",{"2":{"9":3,"98":1,"183":11}}],["diverse",{"2":{"127":1}}],["divisible",{"2":{"182":1}}],["division",{"2":{"67":1,"183":1}}],["divides",{"2":{"63":1}}],["div",{"2":{"67":1,"182":1,"183":6}}],["div>",{"2":{"67":1,"182":1,"183":6}}],["digits",{"2":{"55":2,"93":1,"181":2}}],["digits=1",{"2":{"7":1}}],["differs",{"2":{"108":1}}],["differ",{"2":{"11":1,"106":1,"183":1}}],["differences",{"2":{"183":3}}],["difference",{"2":{"6":1,"7":2,"67":1,"106":1,"182":1,"183":3}}],["differently",{"2":{"11":1,"52":1,"93":1,"106":1,"183":1}}],["different",{"2":{"6":1,"7":2,"23":1,"55":4,"64":1,"66":1,"94":1,"130":2,"160":2,"167":1,"168":1,"169":1,"175":1,"181":5,"182":8,"183":10}}],["didn",{"2":{"181":1}}],["did",{"2":{"9":1,"108":1}}],["disables",{"2":{"183":1}}],["disable",{"2":{"83":1,"183":1}}],["disabled",{"2":{"64":1,"182":1}}],["disney",{"2":{"67":2,"183":2}}],["disk",{"2":{"63":1,"94":2,"105":1,"182":1,"183":4}}],["distinct",{"2":{"153":1,"154":1}}],["distinguished",{"2":{"168":1,"169":1}}],["distinguish",{"2":{"67":1,"183":1}}],["dist",{"2":{"67":6,"183":6}}],["distributed",{"2":{"181":1}}],["distributing",{"2":{"171":1}}],["distributions",{"2":{"170":1}}],["distribution",{"2":{"58":1,"181":2}}],["distract",{"2":{"41":1}}],["distraction",{"2":{"12":1}}],["distances",{"2":{"67":1,"183":1}}],["distance",{"2":{"8":1,"17":2,"66":3,"67":12,"182":10,"183":15}}],["discrimination",{"2":{"183":2}}],["discrepancies",{"2":{"140":1}}],["discrete",{"2":{"11":1,"106":1}}],["discounted",{"2":{"182":2}}],["discovery",{"2":{"120":1}}],["discovered",{"2":{"120":2}}],["discover",{"2":{"11":1,"41":1,"58":3,"61":1,"64":2,"105":1,"106":1,"181":1,"182":2}}],["discussed",{"2":{"153":2}}],["discuss",{"2":{"30":1,"31":1}}],["discussions",{"2":{"153":1}}],["discussion",{"2":{"12":1,"183":1}}],["displayed",{"2":{"183":1}}],["displaysize",{"2":{"182":2,"183":1}}],["display",{"2":{"9":1,"14":1,"58":1,"64":1,"92":1,"182":1,"183":2}}],["dispatching",{"2":{"60":1,"67":1,"182":32,"183":1}}],["dispatches",{"2":{"55":1,"58":1,"64":1,"181":1,"182":1}}],["dispatched",{"2":{"55":1,"181":1}}],["dispatch",{"2":{"9":1,"14":1,"42":1,"60":2,"61":4,"62":2,"64":1,"67":2,"102":1,"105":1,"168":2,"169":2,"181":1,"182":3,"183":6}}],["dicts",{"2":{"183":1}}],["dict=parameters",{"2":{"182":1}}],["dict=dict",{"2":{"182":1}}],["dictates",{"2":{"64":1,"182":3}}],["dictionaries",{"2":{"42":1,"102":1}}],["dictionary",{"2":{"16":1,"55":1,"58":1,"168":1,"169":1,"182":1,"183":21}}],["dict",{"2":{"6":4,"7":2,"58":1,"93":1,"97":3,"98":1,"107":3,"108":9,"182":13,"183":43}}],["doing",{"2":{"183":1}}],["dollar",{"2":{"96":1}}],["dolphin",{"2":{"37":1}}],["domluna",{"2":{"182":1}}],["domain",{"2":{"126":1,"141":1}}],["domains",{"2":{"50":4,"179":4,"182":6}}],["dominating",{"2":{"24":1,"26":1}}],["dot",{"2":{"17":1,"183":2}}],["double",{"2":{"12":1,"76":1,"85":1,"168":1,"169":1,"183":1}}],["doewhat",{"2":{"7":1}}],["doe",{"2":{"7":6}}],["doesn",{"2":{"183":2}}],["does",{"0":{"82":1},"2":{"2":1,"7":1,"11":1,"36":1,"41":1,"52":1,"55":2,"64":1,"76":2,"83":3,"93":1,"95":1,"97":1,"106":1,"121":2,"130":1,"131":1,"181":1,"182":7,"183":16}}],["don",{"2":{"6":1,"8":1,"9":1,"37":1,"39":1,"103":1,"108":1,"110":3,"114":1,"115":1,"117":3,"118":3,"120":1,"152":2,"153":1,"154":1,"167":1,"169":1,"171":1,"175":1,"182":2,"183":9}}],["done",{"2":{"2":1,"7":1,"55":1,"58":1,"64":1,"90":1,"107":1,"108":1,"140":3,"141":3,"142":3,"181":2,"182":1,"183":10}}],["downstream",{"2":{"71":1,"93":1}}],["downloads",{"2":{"43":1,"183":6}}],["downloaded",{"2":{"23":1}}],["download",{"2":{"9":1,"11":1,"37":1,"43":2,"89":2,"106":1,"182":1,"183":10}}],["down",{"2":{"2":1,"60":1,"67":1,"131":1,"183":1}}],["do",{"0":{"8":1,"95":1},"2":{"2":1,"6":1,"7":4,"9":1,"11":1,"12":2,"13":3,"14":3,"20":2,"21":1,"22":2,"24":1,"26":1,"34":1,"35":1,"36":1,"41":1,"46":1,"54":2,"55":6,"60":2,"67":1,"69":2,"77":2,"78":2,"79":1,"83":1,"87":1,"92":1,"93":2,"94":1,"95":1,"106":1,"108":6,"112":1,"118":1,"130":2,"131":2,"132":1,"152":3,"153":1,"167":1,"171":1,"175":1,"181":6,"182":2,"183":36}}],["doc9",{"2":{"58":1}}],["doc2",{"2":{"58":1}}],["doc5",{"2":{"58":1}}],["doc15",{"2":{"58":1}}],["doc8",{"2":{"58":1}}],["doc$i",{"2":{"58":1}}],["doc",{"2":{"46":2,"64":4,"182":4,"183":5}}],["doctor1",{"2":{"7":1}}],["doctorwhat",{"2":{"7":2}}],["doctor",{"2":{"7":2}}],["documenttermmatrix",{"2":{"182":6,"183":1}}],["documented",{"2":{"60":1}}],["document",{"0":{"45":1},"2":{"2":1,"7":1,"11":1,"45":1,"61":1,"64":9,"114":1,"182":21,"183":5}}],["documents",{"0":{"46":1},"2":{"2":1,"7":1,"46":1,"57":2,"58":2,"64":3,"67":2,"140":1,"182":16,"183":6}}],["documentation",{"2":{"1":1,"20":1,"32":1,"50":1,"58":1,"64":1,"69":1,"77":1,"102":1,"108":1,"114":1,"124":1,"179":1,"182":1,"183":10}}],["docstring",{"2":{"85":1,"95":1,"108":1,"182":1,"183":12}}],["docstrings",{"2":{"20":1,"60":1,"147":1,"183":1}}],["docs",{"2":{"2":2,"9":1,"22":1,"46":1,"61":1,"64":6,"75":1,"80":2,"108":6,"182":33,"183":13}}],["dplyr",{"2":{"2":3}}],["degrees",{"2":{"183":16}}],["denote",{"2":{"152":1}}],["declaration",{"2":{"183":4}}],["declarations",{"2":{"142":1}}],["decoded",{"2":{"183":1}}],["decodes",{"2":{"183":1}}],["decode",{"2":{"108":1,"183":4}}],["decision",{"2":{"153":9}}],["decisions",{"2":{"6":1,"153":5}}],["decides",{"2":{"182":1}}],["decide",{"2":{"18":1,"19":1,"137":1}}],["deduplicate",{"2":{"64":1,"182":1}}],["dedicated",{"2":{"1":1,"24":1,"26":1,"106":1,"153":1}}],["deviations",{"2":{"142":1}}],["device",{"2":{"41":1}}],["dev",{"2":{"67":1,"183":1}}],["developers",{"2":{"183":1}}],["developing",{"2":{"58":1}}],["development",{"2":{"58":1,"120":1}}],["deem",{"2":{"126":1}}],["deemed",{"2":{"55":1,"183":1}}],["deepseek",{"2":{"183":6}}],["deepseekopenaischema",{"2":{"183":2}}],["deepdive",{"0":{"63":1}}],["deeper",{"2":{"62":2}}],["deep",{"2":{"41":1,"131":1,"132":1,"161":1,"163":1,"170":1,"174":1}}],["democards",{"2":{"67":1,"183":1}}],["demonstrate",{"2":{"55":1,"181":1}}],["demanding",{"2":{"28":1}}],["depot",{"2":{"78":1}}],["depth",{"2":{"50":3,"179":3}}],["depend",{"2":{"183":3}}],["depends",{"2":{"14":1}}],["depending",{"2":{"11":1,"64":2,"80":1,"106":1,"164":1,"182":2,"183":1}}],["dependencies",{"2":{"9":1,"56":1}}],["delim",{"2":{"183":2}}],["delicious",{"2":{"31":2,"108":8}}],["dels",{"2":{"63":1}}],["delay=2",{"2":{"55":1,"181":1}}],["delay",{"2":{"22":1,"54":1,"55":5,"181":7,"183":2}}],["delete",{"2":{"2":1,"4":1,"78":1}}],["defauls",{"2":{"183":1}}],["defaults",{"2":{"55":10,"56":1,"60":1,"63":1,"64":19,"67":4,"181":17,"182":30,"183":95}}],["default",{"0":{"90":1},"2":{"17":1,"37":1,"42":1,"50":7,"55":2,"58":3,"60":1,"64":41,"67":1,"71":1,"90":2,"106":1,"107":1,"108":1,"179":7,"181":1,"182":103,"183":74}}],["def2",{"2":{"182":1}}],["def",{"2":{"182":7}}],["defining",{"2":{"60":3}}],["definitions",{"2":{"183":2}}],["definition",{"2":{"55":1,"130":1,"171":7,"183":2}}],["defines",{"2":{"182":4,"183":5}}],["defined",{"0":{"19":1},"2":{"19":1,"55":1,"62":1,"63":1,"104":1,"105":2,"153":1,"162":1,"181":1,"182":3,"183":22}}],["define",{"2":{"2":1,"9":1,"11":1,"20":2,"37":1,"42":1,"55":1,"64":3,"93":2,"106":1,"107":1,"108":2,"140":1,"181":1,"182":3,"183":16}}],["deferring",{"2":{"55":1,"181":1}}],["deferred",{"2":{"11":1,"52":2,"55":2,"106":1,"181":3}}],["defer",{"2":{"18":1}}],["destination",{"2":{"183":1}}],["descending",{"2":{"112":1}}],["descriptive",{"2":{"152":2,"153":1}}],["description=>",{"2":{"183":2}}],["description=sig",{"2":{"108":2}}],["descriptions",{"2":{"19":1,"183":32}}],["description",{"0":{"155":1,"156":1},"1":{"157":1,"158":1,"159":1,"160":1,"161":1,"162":1,"163":1,"164":1,"165":1,"166":1,"167":1,"168":1,"169":1,"170":1,"171":1,"172":1,"173":1,"174":1,"175":1},"2":{"9":4,"14":1,"19":1,"42":1,"94":2,"108":10,"110":1,"112":1,"114":1,"115":1,"117":1,"118":1,"120":1,"121":1,"122":1,"124":1,"125":1,"126":1,"127":1,"128":1,"130":1,"131":1,"132":1,"134":1,"136":1,"137":1,"138":1,"140":1,"141":1,"142":1,"144":2,"145":2,"147":1,"149":1,"150":1,"152":3,"153":1,"154":1,"157":1,"158":1,"159":1,"160":1,"161":1,"162":1,"163":1,"164":1,"165":1,"166":1,"167":1,"168":1,"169":1,"170":1,"171":5,"173":1,"174":1,"175":1,"177":1,"178":1,"183":48}}],["describes",{"2":{"136":1,"138":1,"158":1}}],["described",{"2":{"126":1,"140":1}}],["describe",{"2":{"21":2,"43":1,"152":1,"153":2,"166":1,"168":1,"174":1,"183":6}}],["despite",{"0":{"78":2},"2":{"183":1}}],["desired",{"2":{"66":2,"140":1,"142":1,"181":1}}],["designed",{"2":{"0":1,"11":4,"52":1,"55":2,"56":1,"60":2,"94":1,"106":5,"108":1,"181":2,"183":5}}],["deserialize",{"2":{"2":1,"183":1}}],["debugging",{"2":{"99":1,"182":1,"183":18}}],["debug",{"2":{"2":1,"64":1,"182":1,"183":1}}],["determining",{"2":{"183":1}}],["determines",{"2":{"100":1,"183":1}}],["determine",{"2":{"0":1,"4":1,"7":1,"158":1}}],["detects",{"2":{"183":3}}],["detected",{"2":{"183":1}}],["detect",{"2":{"183":2}}],["detachment",{"2":{"13":2}}],["detail=",{"2":{"183":1}}],["detailorientedtask",{"0":{"159":1}}],["detail",{"2":{"64":1,"85":1,"130":1,"131":1,"144":1,"145":1,"147":1,"159":2,"182":1,"183":14}}],["details",{"2":{"2":1,"9":1,"11":2,"37":1,"52":1,"55":2,"63":1,"64":9,"80":1,"105":2,"106":2,"108":2,"124":1,"125":1,"128":1,"144":1,"145":1,"147":1,"171":1,"181":8,"182":30,"183":12}}],["detailed",{"2":{"0":1,"55":2,"120":1,"140":1,"181":4}}],["aaazzam",{"2":{"183":1}}],["aai",{"2":{"12":2,"72":3,"183":11}}],["axolotl",{"2":{"96":1}}],["azureopenaischema",{"2":{"183":4}}],["azure",{"0":{"91":1},"2":{"183":17}}],["a>",{"2":{"67":1,"182":1,"183":6}}],["away",{"2":{"55":1,"67":1,"78":1,"181":1,"183":1}}],["awareness",{"2":{"64":1,"182":2}}],["aware",{"2":{"52":1,"57":1,"66":1}}],["amount",{"2":{"183":1}}],["among",{"2":{"171":1}}],["ambiguities",{"2":{"141":1}}],["amazing",{"2":{"102":1}}],["amazingly",{"2":{"23":1}}],["am",{"2":{"31":1,"41":1,"108":1,"183":6}}],["amp",{"0":{"4":1,"5":1,"6":1},"2":{"2":1,"3":1,"6":1,"7":1,"64":4,"182":4}}],["acronyms",{"2":{"126":1}}],["across",{"2":{"55":1,"62":2,"64":2,"85":1,"108":1,"181":2,"182":5,"183":2}}],["achievable",{"2":{"67":1,"183":1}}],["achieve",{"2":{"34":1,"67":3,"87":1,"93":1,"181":1,"183":3}}],["acts",{"2":{"183":1}}],["action",{"2":{"153":1,"181":2}}],["actionable",{"2":{"140":1,"141":1,"142":1,"150":1,"153":1}}],["active",{"2":{"55":13,"181":17}}],["act",{"2":{"92":1,"97":1,"154":1,"183":3}}],["actually",{"2":{"28":1,"36":2,"130":1,"131":1,"171":1,"183":1}}],["actual",{"2":{"9":2,"55":1,"130":1,"131":1,"132":1,"171":1,"181":2}}],["accumulate",{"2":{"182":1}}],["accuracy",{"2":{"58":1,"140":1,"144":1,"145":1,"147":1,"182":5}}],["accurately",{"2":{"142":1,"150":1,"158":1,"178":1}}],["accurate",{"2":{"24":1,"26":1,"183":1}}],["account",{"2":{"69":3,"77":2,"79":1,"80":2,"81":1,"83":1}}],["according",{"2":{"67":1,"183":2}}],["accesor",{"2":{"182":1}}],["accesses",{"2":{"183":1}}],["accessed",{"2":{"94":1,"183":1}}],["accessible",{"2":{"164":1,"183":1}}],["accessing",{"2":{"77":1,"183":11}}],["accessors",{"2":{"182":1}}],["accessor",{"2":{"55":4,"181":2,"183":2}}],["access",{"0":{"75":1,"87":1},"2":{"22":1,"28":1,"31":1,"45":1,"51":1,"52":1,"54":1,"55":3,"56":1,"69":1,"78":1,"83":2,"87":2,"89":1,"91":1,"93":1,"100":1,"101":1,"110":1,"117":1,"118":1,"181":5,"182":7,"183":25}}],["accepts",{"2":{"55":3,"108":1,"181":4}}],["accept",{"2":{"55":3,"181":4,"182":1}}],["ah",{"2":{"13":1,"183":1}}],["agreements",{"2":{"160":1}}],["agreed",{"2":{"153":2}}],["agnostic",{"2":{"97":1}}],["agents",{"2":{"51":1,"182":3}}],["agentic",{"2":{"51":1,"55":1,"106":1,"180":1,"181":2,"183":2}}],["agent",{"0":{"22":1,"51":1},"1":{"52":1,"53":1,"54":1,"55":1},"2":{"22":1,"55":1,"181":2,"183":1}}],["agenttools",{"0":{"181":1},"2":{"11":3,"22":1,"51":3,"55":6,"66":1,"93":1,"106":2,"180":2,"181":69,"183":35}}],["age",{"2":{"20":2,"183":13}}],["against",{"2":{"58":1,"64":1,"182":1,"183":1}}],["again",{"2":{"12":1,"94":1,"183":4}}],["administrator",{"2":{"170":1}}],["adhere",{"2":{"183":3}}],["adheres",{"2":{"142":1}}],["adherence",{"2":{"140":2,"144":1,"145":1,"147":1}}],["adapted",{"2":{"117":1,"118":1,"125":1,"127":1,"128":1}}],["adapt",{"2":{"96":1}}],["advisable",{"2":{"182":1}}],["advice",{"2":{"37":1,"67":2,"183":2}}],["advantages",{"2":{"58":1}}],["advancements",{"2":{"58":1}}],["advance",{"2":{"55":1,"181":1}}],["advancedgenerator",{"2":{"64":1,"182":3,"183":1}}],["advancedretriever",{"2":{"62":3,"64":4,"182":7,"183":1}}],["advanced",{"0":{"13":1,"35":1,"41":1},"2":{"50":2,"51":1,"64":3,"99":1,"114":1,"179":2,"182":4,"183":1}}],["adjectives",{"2":{"31":2,"108":8}}],["adjustments",{"2":{"140":2,"142":1}}],["adjusts",{"2":{"39":1,"41":1,"183":1}}],["adjust",{"2":{"9":1,"52":1,"171":1}}],["addresses",{"2":{"142":1}}],["addressed",{"2":{"130":1,"141":1}}],["address",{"2":{"130":1,"140":1,"171":1,"183":5}}],["addded",{"2":{"64":1,"182":1}}],["adding",{"2":{"64":1,"66":1,"83":1,"91":1,"108":1,"142":1,"153":1,"182":1,"183":3}}],["additionalproperties",{"2":{"183":1}}],["additional",{"2":{"49":1,"55":1,"60":1,"64":13,"71":1,"117":1,"118":1,"126":2,"152":1,"181":4,"182":22,"183":46}}],["addition",{"2":{"11":2,"19":1,"31":1,"60":1,"64":1,"106":1,"167":4,"175":4,"182":2,"183":2}}],["adds",{"2":{"12":1,"181":1,"182":3,"183":2}}],["added",{"2":{"9":1,"37":1,"55":1,"64":1,"67":2,"181":1,"182":5,"183":5}}],["add",{"2":{"8":3,"9":7,"12":1,"14":2,"21":1,"32":1,"39":1,"42":1,"47":1,"54":1,"55":3,"64":9,"70":1,"83":1,"84":1,"87":2,"91":1,"94":2,"97":1,"108":2,"126":1,"152":1,"181":9,"182":34,"183":18}}],["affection",{"2":{"183":1}}],["affects",{"2":{"8":1}}],["after",{"2":{"9":1,"55":5,"64":2,"67":3,"71":1,"79":1,"93":1,"94":1,"152":1,"153":1,"167":1,"175":1,"181":3,"182":4,"183":11}}],["auditing",{"2":{"183":2}}],["audience",{"2":{"140":5,"162":5,"164":2,"171":2}}],["authorization",{"2":{"183":2}}],["authentication",{"2":{"183":3}}],["auth",{"2":{"183":2}}],["auto",{"2":{"55":5,"140":1,"141":1,"142":1,"181":1,"183":17}}],["automatic",{"0":{"54":1,"98":1},"2":{"11":1,"52":1,"55":1,"83":1,"106":1,"108":1,"183":1}}],["automatically",{"2":{"9":1,"18":1,"52":2,"55":2,"57":1,"58":1,"63":1,"78":1,"84":1,"98":6,"108":3,"182":1,"183":16}}],["augment",{"2":{"126":1}}],["augmented",{"0":{"1":1},"1":{"2":1},"2":{"1":1,"56":1,"64":2,"180":1,"182":3}}],["avg",{"2":{"7":2}}],["average",{"2":{"7":1,"64":1,"121":1,"182":3}}],["available",{"2":{"6":1,"9":4,"11":1,"14":2,"24":2,"26":2,"32":2,"61":1,"63":2,"64":4,"78":1,"89":2,"91":1,"105":1,"106":1,"108":1,"136":1,"138":1,"152":1,"182":7,"183":24}}],["avoiding",{"2":{"58":1,"120":1}}],["avoided",{"2":{"12":1}}],["avoid",{"2":{"2":1,"42":1,"55":2,"64":1,"67":1,"72":1,"79":1,"130":1,"131":1,"168":1,"169":1,"182":5,"183":3}}],["april",{"2":{"16":1}}],["apostrophes",{"2":{"182":1}}],["apos",{"2":{"7":12}}],["appends",{"2":{"183":3}}],["append",{"2":{"181":1}}],["appended",{"2":{"55":1,"182":1,"183":2}}],["approximates",{"2":{"181":2}}],["appropriate",{"2":{"136":1,"138":3,"150":1,"183":1}}],["approach>",{"2":{"174":4}}],["approach",{"2":{"55":1,"106":1,"124":1,"166":4,"168":2,"169":1,"174":4,"181":1,"182":3}}],["appreciate",{"2":{"13":1}}],["applying",{"2":{"181":1,"182":1}}],["apply",{"2":{"64":1,"67":1,"144":1,"182":2,"183":4}}],["apples",{"2":{"183":2}}],["apple",{"2":{"31":2,"37":1,"108":8,"183":1}}],["applicable",{"2":{"64":1,"154":1,"182":2}}],["applications",{"2":{"11":1,"56":3,"57":1,"58":1,"64":1,"110":1,"114":1,"115":1,"117":1,"118":1,"120":1,"121":1,"122":1,"124":1,"125":1,"127":1,"128":1,"163":1,"167":1,"175":1,"182":1}}],["application",{"2":{"2":1,"58":1,"100":1,"101":1}}],["applied",{"2":{"11":1,"52":1,"55":1,"57":1,"67":1,"106":1,"181":1,"182":1,"183":15}}],["applies",{"2":{"2":1,"63":1,"67":1,"181":2,"183":1}}],["app",{"2":{"4":1,"89":1,"167":1,"175":1}}],["apikey",{"0":{"78":1}}],["apitools",{"0":{"48":1,"179":1},"1":{"49":1,"50":1},"2":{"48":2,"50":1,"179":4,"180":1,"183":2}}],["apis",{"0":{"24":1,"25":1,"27":1},"1":{"26":1,"27":1,"28":1,"29":1,"30":1,"31":1},"2":{"24":1,"36":1,"48":1,"74":1,"75":1,"82":1,"100":1,"103":1,"180":1}}],["api",{"0":{"24":1,"77":1,"78":2,"84":1,"85":1,"86":2,"91":1,"97":1,"101":1},"2":{"0":7,"2":1,"7":1,"8":1,"11":2,"21":1,"22":3,"24":10,"25":1,"26":4,"27":8,"28":1,"29":5,"30":2,"31":2,"32":4,"37":1,"42":1,"49":2,"50":5,"54":3,"55":8,"56":1,"62":5,"64":34,"69":9,"76":3,"77":3,"78":5,"80":2,"81":1,"83":4,"84":10,"85":4,"86":1,"89":1,"91":3,"97":2,"98":3,"100":2,"101":3,"102":2,"106":4,"107":7,"108":7,"179":11,"180":1,"181":10,"182":69,"183":354}}],["abbreviations",{"2":{"126":1}}],["ab",{"2":{"67":1,"183":2}}],["abcabc",{"2":{"67":1,"183":1}}],["abc",{"2":{"55":3,"67":7,"106":2,"181":3,"182":7,"183":9}}],["ability",{"2":{"31":1,"117":1,"118":1}}],["abilities",{"2":{"24":1,"26":1}}],["abs",{"2":{"55":1,"181":1}}],["absence",{"2":{"13":1}}],["abstractextracteddata",{"2":{"183":1}}],["abstractembedder",{"2":{"61":2,"64":2,"182":10}}],["abstractdict",{"2":{"183":4}}],["abstractdatamessage",{"2":{"183":1}}],["abstractdocumenttermmatrix",{"2":{"182":1}}],["abstractdocumentindex",{"2":{"64":4,"182":17}}],["abstractfloat",{"2":{"182":4}}],["abstractprocessor=keywordsprocessor",{"2":{"182":1}}],["abstractprocessor",{"2":{"64":2,"182":7}}],["abstractpromptschema",{"2":{"0":1,"55":1,"100":1,"101":1,"102":2,"181":1,"183":6}}],["abstractpostprocessor",{"2":{"61":1,"64":2,"182":4}}],["abstractstreamflavor",{"2":{"183":1}}],["abstractstreamcallback",{"2":{"183":4}}],["abstractstring=",{"2":{"55":4,"67":2,"182":1,"183":8}}],["abstractstring",{"2":{"11":2,"50":8,"55":6,"61":3,"64":13,"67":15,"106":2,"179":10,"181":4,"182":89,"183":118}}],["abstractsharegptschema",{"2":{"183":1}}],["abstractscoringmethod",{"2":{"181":4}}],["abstractsimilarityfinder",{"2":{"61":1,"64":1,"182":11}}],["abstractgenerationmethod",{"2":{"182":1}}],["abstractgenerator",{"2":{"61":3,"64":4,"182":8,"183":1}}],["abstractgoogleschema",{"2":{"0":1,"183":2}}],["abstracttracer",{"2":{"183":1}}],["abstracttracermessage",{"2":{"183":2}}],["abstracttracerschema",{"2":{"183":14}}],["abstracttrees",{"2":{"55":1,"181":1,"182":1}}],["abstracttool",{"2":{"183":11}}],["abstracttagfilter",{"2":{"61":1,"64":1,"182":8}}],["abstracttagger",{"2":{"61":2,"64":4,"182":12}}],["abstractragconfig",{"2":{"61":3,"64":2,"182":3}}],["abstractragresult",{"2":{"60":2,"61":4,"63":1,"64":4,"182":14}}],["abstractretrievalmethod",{"2":{"182":1}}],["abstractretriever",{"2":{"60":2,"61":2,"64":4,"182":9,"183":1}}],["abstractrefiner",{"2":{"61":1,"64":4,"182":8}}],["abstractrephraser",{"2":{"61":1,"64":2,"182":9}}],["abstractreranker",{"2":{"60":1,"61":2,"64":2,"182":10}}],["abstractindexbuilder",{"2":{"61":2,"64":2,"182":7,"183":1}}],["abstractindex",{"2":{"60":1}}],["abstractcodeblock",{"2":{"183":1}}],["abstractcodeoutcome",{"2":{"55":2,"181":2}}],["abstractcontextbuilder",{"2":{"61":1,"64":2,"182":4}}],["abstractchar",{"2":{"67":1,"183":1}}],["abstractchatmessage",{"2":{"9":2,"94":1,"107":1,"183":3}}],["abstractchunker",{"2":{"61":1,"64":2,"182":7}}],["abstractchunkindex",{"2":{"61":4,"64":1,"182":17,"183":1}}],["abstractcandidatechunks",{"2":{"60":1,"182":6,"183":1}}],["abstractmatrix",{"2":{"182":13}}],["abstractmanagedschema",{"2":{"0":1,"183":2}}],["abstractmultiindex",{"2":{"182":2,"183":1}}],["abstractmessage",{"2":{"55":2,"58":1,"92":1,"93":1,"97":2,"103":1,"104":1,"181":6,"182":1,"183":110}}],["abstractvector",{"2":{"50":2,"55":1,"64":6,"67":1,"179":2,"181":4,"182":54,"183":50}}],["abstractannotationstyler",{"2":{"182":4}}],["abstractannotatednode",{"2":{"182":5}}],["abstractanswerer",{"2":{"61":1,"64":2,"182":4}}],["abstractanthropicschema",{"2":{"0":1,"183":8}}],["abstractarray",{"2":{"11":1,"106":1}}],["abstractoutcomes",{"2":{"181":1}}],["abstractollamamanagedschema",{"2":{"0":1,"183":6}}],["abstractollamaschema",{"2":{"0":1,"183":3}}],["abstractopenaischema",{"2":{"0":9,"91":2,"101":1,"102":1,"183":13}}],["abstract",{"2":{"0":1,"63":1,"64":2,"93":1,"108":2,"168":1,"169":1,"182":8,"183":1}}],["above",{"2":{"11":1,"14":2,"16":1,"18":1,"28":1,"57":1,"64":1,"66":1,"82":1,"106":2,"107":1,"112":1,"121":1,"130":1,"131":1,"132":1,"152":1,"160":1,"168":1,"169":1,"171":1,"181":1,"182":2,"183":5}}],["about",{"2":{"9":1,"14":2,"37":1,"52":1,"64":1,"93":1,"97":2,"98":2,"104":1,"107":1,"140":1,"161":1,"162":1,"163":1,"164":2,"165":1,"167":1,"168":1,"169":1,"170":1,"173":1,"175":1,"182":4,"183":19}}],["able",{"2":{"2":1,"181":1}}],["architectures",{"2":{"183":2}}],["arches",{"2":{"63":1}}],["arxiv",{"2":{"182":1}}],["arr",{"2":{"94":2,"182":3,"183":2}}],["arrays",{"2":{"168":1,"169":1,"183":1}}],["array",{"2":{"9":1,"14":1,"23":1,"45":1,"58":3,"67":3,"94":1,"182":5,"183":8}}],["arbitrary",{"2":{"93":1,"183":3}}],["art",{"2":{"74":1}}],["artificial",{"2":{"17":1}}],["articles",{"2":{"58":1,"183":2}}],["article",{"2":{"1":1}}],["arg2",{"2":{"183":1}}],["arg1",{"2":{"183":1}}],["arg",{"2":{"183":8}}],["argmin",{"2":{"67":2,"183":2}}],["argmax",{"2":{"67":1,"183":1}}],["args",{"2":{"55":5,"181":11,"182":1,"183":7}}],["argumenterror",{"0":{"78":2}}],["arguments",{"0":{"62":1,"86":1},"2":{"11":4,"13":1,"20":1,"22":1,"50":1,"52":2,"55":10,"60":2,"62":2,"64":17,"67":5,"72":1,"106":4,"108":2,"144":1,"145":1,"147":1,"149":1,"179":1,"181":27,"182":39,"183":108}}],["argument",{"2":{"6":1,"7":8,"9":1,"11":2,"16":1,"22":2,"24":1,"26":1,"39":1,"45":1,"54":2,"55":4,"58":2,"60":2,"67":1,"79":1,"92":2,"94":1,"106":2,"144":1,"145":1,"147":1,"181":5,"182":1,"183":16}}],["around",{"2":{"11":1,"60":1,"64":1,"67":1,"72":1,"102":1,"106":1,"171":1,"181":1,"182":5,"183":5}}],["areas",{"2":{"130":1}}],["are",{"2":{"0":1,"5":1,"6":1,"7":8,"9":12,"11":1,"12":1,"13":1,"14":3,"18":1,"20":1,"22":1,"24":5,"26":2,"27":3,"36":2,"37":1,"41":1,"52":3,"54":1,"55":12,"57":3,"58":4,"60":4,"61":1,"63":2,"64":11,"66":4,"67":6,"74":1,"75":1,"80":1,"83":1,"89":1,"90":1,"91":1,"93":2,"94":1,"97":1,"98":1,"100":2,"102":2,"103":2,"104":2,"105":2,"106":2,"107":4,"108":2,"112":2,"118":3,"120":1,"125":1,"126":1,"127":1,"128":1,"130":4,"136":1,"137":1,"138":3,"140":1,"142":2,"144":1,"145":1,"147":1,"150":1,"152":1,"153":2,"156":1,"157":1,"159":2,"160":1,"161":2,"162":1,"163":2,"164":2,"165":1,"166":1,"167":3,"168":1,"169":1,"170":2,"171":4,"173":1,"174":1,"175":3,"178":1,"181":14,"182":41,"183":81}}],["atop",{"2":{"183":1}}],["atomic",{"2":{"64":10,"182":29}}],["ate",{"2":{"31":1,"108":3,"183":1}}],["attribute",{"2":{"182":2}}],["attract",{"2":{"127":1}}],["attempted",{"2":{"55":1,"181":3}}],["attempts",{"2":{"52":1,"55":2,"93":1,"181":3}}],["attempt",{"2":{"22":1,"54":1,"55":2,"181":2}}],["attach",{"2":{"183":5}}],["attached",{"2":{"13":1,"35":1,"41":1,"183":2}}],["attachments",{"2":{"13":1}}],["attachment",{"2":{"13":1,"35":1,"183":3}}],["at",{"2":{"1":1,"6":1,"7":1,"22":2,"24":1,"27":1,"31":1,"32":1,"34":1,"36":1,"43":1,"49":1,"51":1,"54":2,"55":5,"60":1,"63":1,"64":4,"74":1,"76":1,"79":1,"90":1,"100":1,"105":1,"107":1,"115":2,"121":2,"154":1,"181":12,"182":11,"183":23}}],["aspect",{"2":{"177":1}}],["aspects",{"2":{"140":1,"141":1,"142":1,"167":1,"175":1}}],["as=",{"2":{"94":2,"105":1,"183":1}}],["assesses",{"2":{"182":2}}],["assess",{"2":{"140":1}}],["assertion",{"2":{"183":1}}],["assertions",{"2":{"22":1,"55":1,"181":1}}],["assert",{"2":{"55":1,"108":2,"181":1,"182":2}}],["assigning",{"2":{"171":1}}],["assign",{"2":{"114":1,"121":1}}],["assistance",{"2":{"24":1,"26":1,"42":1}}],["assistant",{"2":{"13":1,"34":1,"92":1,"97":1,"98":1,"105":1,"107":2,"110":1,"112":1,"117":1,"118":1,"121":2,"122":1,"124":1,"126":1,"128":1,"140":3,"141":7,"142":2,"150":1,"157":2,"158":1,"159":1,"183":22}}],["assistantask",{"0":{"157":1},"2":{"11":1,"105":4,"106":1,"107":3}}],["assist",{"2":{"23":1,"24":1,"26":1,"30":2,"31":1,"34":1,"92":1,"183":10}}],["associated",{"2":{"90":1,"107":1,"153":1,"182":1,"183":5}}],["assuming",{"2":{"64":1,"80":1,"82":1,"182":3,"183":1}}],["assumed",{"2":{"181":1,"182":4,"183":1}}],["assumes",{"2":{"37":1,"64":1,"182":5,"183":5}}],["assume",{"2":{"23":1,"70":1,"79":1,"181":1,"182":2}}],["asterisk",{"2":{"12":1}}],["asynchronous",{"0":{"15":1},"2":{"72":1,"183":5}}],["asyncmap",{"2":{"7":1,"15":3,"46":2,"64":1,"72":1,"79":3,"182":4}}],["async",{"2":{"7":1,"46":1,"183":1}}],["asks",{"2":{"132":1,"160":2}}],["ask=",{"2":{"55":1,"106":2,"107":2,"181":1}}],["asked",{"0":{"73":1},"1":{"74":1,"75":1,"76":1,"77":1,"78":1,"79":1,"80":1,"81":1,"82":1,"83":1,"84":1,"85":1,"86":1,"87":1,"88":1,"89":1,"90":1,"91":1,"92":1,"93":1,"94":1,"95":1,"96":1,"97":1,"98":1},"2":{"37":1,"182":1}}],["asking",{"2":{"9":2,"14":1,"157":1,"161":1,"163":1,"165":1,"170":1,"173":1,"183":2}}],["ask",{"2":{"2":1,"9":8,"14":4,"22":2,"24":1,"26":1,"31":1,"34":1,"42":1,"54":2,"55":2,"57":1,"66":1,"105":3,"107":7,"131":1,"157":3,"161":3,"163":3,"165":3,"170":3,"173":3,"181":2,"182":2,"183":6}}],["as",{"2":{"0":3,"2":1,"6":1,"7":4,"9":5,"11":9,"12":1,"16":1,"17":1,"18":1,"19":1,"21":1,"22":2,"24":5,"26":2,"27":2,"29":3,"30":2,"31":4,"32":1,"34":1,"39":1,"42":2,"43":2,"47":1,"48":1,"49":1,"51":1,"52":2,"54":3,"55":13,"56":1,"58":2,"60":3,"63":1,"64":1,"67":15,"69":1,"72":2,"79":1,"83":2,"84":3,"92":4,"93":4,"94":5,"95":1,"97":3,"98":1,"101":1,"104":1,"105":2,"106":7,"107":2,"108":12,"110":1,"114":1,"117":2,"118":1,"121":1,"124":2,"125":2,"130":3,"140":6,"141":1,"142":2,"144":3,"145":3,"147":3,"152":6,"153":4,"154":3,"158":2,"160":4,"162":2,"164":1,"167":3,"171":1,"175":3,"178":2,"181":28,"182":34,"183":133}}],["a",{"0":{"1":1,"4":1,"5":1,"6":1,"94":1,"95":1,"96":1},"1":{"2":1},"2":{"0":3,"1":2,"2":6,"3":3,"4":1,"5":2,"6":9,"7":16,"8":2,"9":23,"11":14,"12":6,"13":7,"14":7,"16":3,"17":1,"18":4,"19":3,"20":6,"21":5,"22":6,"23":1,"24":3,"25":1,"26":3,"28":2,"29":1,"30":3,"31":4,"32":1,"34":2,"35":2,"36":2,"37":1,"39":1,"40":1,"41":5,"42":3,"43":1,"45":1,"47":1,"50":2,"51":1,"52":12,"54":3,"55":47,"56":2,"57":6,"58":8,"60":6,"63":6,"64":42,"65":2,"66":12,"67":35,"69":1,"71":2,"78":2,"79":5,"80":1,"81":4,"82":8,"83":1,"84":5,"89":3,"91":2,"92":5,"93":14,"94":11,"96":7,"97":3,"98":4,"100":1,"101":2,"102":1,"103":4,"104":1,"105":8,"106":14,"107":6,"108":24,"110":1,"114":2,"117":1,"118":1,"120":4,"121":4,"122":4,"124":3,"125":3,"126":4,"127":1,"128":4,"130":4,"131":1,"132":1,"136":2,"138":2,"140":7,"141":7,"142":5,"144":2,"145":2,"147":2,"150":5,"152":9,"153":6,"154":3,"157":1,"158":7,"159":2,"160":5,"161":1,"162":3,"163":1,"164":5,"165":1,"166":1,"167":8,"168":2,"169":3,"170":1,"171":6,"173":1,"174":2,"175":8,"177":4,"178":1,"179":2,"181":85,"182":216,"183":625}}],["al",{"2":{"182":3}}],["algorithm",{"2":{"182":5}}],["algorithms",{"2":{"55":1,"58":1,"181":2}}],["alignment",{"2":{"144":1,"145":1,"147":1}}],["aligns",{"2":{"140":1,"182":2,"183":2}}],["aligned",{"2":{"125":1,"127":1,"182":1}}],["align",{"2":{"121":1,"140":1,"142":1,"182":4,"183":7}}],["aliased",{"2":{"31":1}}],["aliases",{"0":{"16":1},"2":{"16":5,"29":1,"37":1,"42":1,"83":1,"91":2,"183":36}}],["alias",{"2":{"7":1,"29":1,"30":1,"31":1,"33":1,"42":1,"108":2,"182":1,"183":36}}],["alexander",{"2":{"120":3}}],["almost",{"2":{"60":1}}],["alternative",{"2":{"126":1,"183":7}}],["alternatives",{"0":{"88":1},"2":{"75":1}}],["alternatively",{"2":{"9":1,"55":1,"69":1,"89":1,"181":2,"183":3}}],["alter",{"2":{"55":1,"183":1}}],["already",{"2":{"30":1,"31":1,"37":1,"64":1,"70":1,"79":1,"89":1,"181":1,"182":4}}],["always",{"2":{"9":1,"24":1,"26":1,"28":1,"55":2,"60":1,"66":1,"67":2,"76":1,"77":1,"81":1,"93":1,"94":2,"130":2,"138":1,"140":1,"141":1,"142":1,"169":1,"181":5,"182":4,"183":6}}],["also",{"2":{"2":1,"6":1,"7":2,"9":4,"11":1,"13":1,"16":1,"19":2,"22":1,"23":1,"24":1,"28":1,"29":2,"30":1,"31":1,"50":1,"54":1,"55":8,"64":7,"67":2,"85":1,"91":1,"93":1,"94":1,"98":1,"106":1,"108":2,"130":1,"131":1,"132":1,"179":1,"181":7,"182":19,"183":42}}],["all=false",{"2":{"183":7}}],["all=true`",{"2":{"106":1}}],["all=true",{"2":{"2":1,"58":1,"64":2,"92":4,"93":1,"98":1,"106":1,"108":1,"182":2,"183":22}}],["alltagfilter",{"2":{"182":4,"183":1}}],["allocated",{"2":{"182":1}}],["allocations",{"2":{"182":1}}],["allocation",{"2":{"58":1}}],["allowing",{"2":{"56":1,"181":1,"183":2}}],["allow",{"2":{"52":1,"88":1,"108":1,"182":1,"183":4}}],["allowed",{"2":{"22":1,"55":1,"108":1,"181":2,"183":34}}],["allows",{"2":{"11":1,"22":1,"23":1,"25":1,"42":1,"52":3,"54":1,"55":2,"64":2,"81":1,"106":1,"181":4,"182":2,"183":9}}],["all",{"2":{"0":2,"2":1,"6":3,"7":9,"9":1,"11":3,"12":2,"13":1,"14":1,"16":1,"20":1,"24":1,"26":1,"35":1,"52":2,"55":16,"58":1,"60":1,"62":2,"63":1,"64":12,"67":1,"70":1,"92":1,"93":1,"94":2,"97":4,"99":1,"101":1,"102":1,"105":1,"106":3,"108":1,"114":1,"121":3,"130":2,"131":1,"153":2,"167":1,"168":1,"169":1,"171":2,"175":1,"178":1,"181":30,"182":26,"183":82}}],["along",{"2":{"0":1,"64":1,"140":1,"181":1,"182":2}}],["anonymous",{"2":{"168":1,"169":1}}],["another",{"2":{"12":1,"55":1,"64":3,"181":2,"182":5,"183":1}}],["ancient",{"2":{"67":1,"183":1}}],["ancestors",{"2":{"55":1,"181":5}}],["ancestor",{"2":{"55":1,"181":1}}],["annotation",{"2":{"182":3}}],["annotations",{"2":{"64":1,"168":1,"169":1,"182":3}}],["annotating",{"2":{"182":1}}],["annotatednode",{"2":{"182":11,"183":1}}],["annotated",{"2":{"64":6,"182":6}}],["annotates",{"2":{"64":1,"182":1}}],["annotater",{"2":{"64":6,"182":10}}],["annotate",{"2":{"11":1,"57":1,"58":1,"64":8,"182":16,"183":3}}],["animal",{"2":{"19":2,"93":2,"183":8}}],["ans",{"2":{"11":5,"71":1,"106":5}}],["answer=",{"2":{"182":4}}],["answer=answer",{"2":{"182":1}}],["answering",{"2":{"121":1,"144":1}}],["answered",{"2":{"64":1,"120":1,"153":1,"182":1}}],["answerer",{"2":{"62":1,"64":11,"182":17}}],["answers",{"2":{"3":1,"8":1,"56":1,"57":1,"110":1,"117":1,"118":1,"120":1,"124":2,"125":1,"140":1,"141":3,"142":1,"152":1,"161":2,"163":2,"170":2,"182":5}}],["answer",{"2":{"2":3,"5":1,"6":4,"7":4,"9":6,"11":4,"12":2,"18":1,"22":3,"24":1,"26":1,"31":1,"49":1,"50":4,"54":3,"55":16,"57":3,"58":7,"60":1,"61":2,"63":1,"64":33,"93":1,"105":2,"106":1,"107":4,"110":4,"117":20,"118":22,"120":6,"121":14,"122":7,"124":1,"125":1,"126":1,"137":1,"141":2,"142":3,"157":2,"161":2,"163":2,"165":2,"170":2,"173":2,"179":4,"181":13,"182":103,"183":9}}],["antropic",{"2":{"183":1}}],["antibiotics",{"2":{"120":2}}],["anti",{"2":{"7":1}}],["anthropicstream",{"2":{"183":3}}],["anthropicschema",{"2":{"0":1,"183":4}}],["anthropic",{"2":{"0":1,"74":1,"75":1,"144":1,"145":1,"173":1,"174":1,"175":1,"183":28}}],["analystthemesinresponses",{"0":{"154":1}}],["analystdecisionsintranscript",{"0":{"153":1}}],["analyst",{"2":{"152":1,"153":1}}],["analystchaptersintranscript",{"0":{"152":1}}],["analysis",{"2":{"6":1,"7":1,"9":4,"18":1,"55":1,"58":2,"154":1,"181":1}}],["analyzed",{"2":{"154":1}}],["analyze",{"2":{"7":1,"120":1,"127":1,"140":1,"141":1,"142":1,"144":1,"145":1,"147":1,"153":1}}],["anytagfilter",{"2":{"182":4,"183":1}}],["anything",{"2":{"31":1,"34":1,"39":1,"40":1,"42":1,"152":1,"182":1,"183":3}}],["anymore",{"2":{"90":1}}],["anyone",{"2":{"69":1,"77":1}}],["anywhere",{"0":{"87":1},"2":{"23":1,"87":1,"89":1}}],["anyscale",{"2":{"8":1}}],["any",{"2":{"0":2,"2":1,"4":1,"6":2,"7":2,"9":2,"11":2,"12":3,"14":1,"18":1,"19":4,"22":1,"24":2,"25":1,"26":1,"28":1,"31":2,"32":1,"34":1,"35":1,"42":1,"52":3,"54":1,"55":13,"56":1,"60":2,"63":1,"64":1,"67":1,"69":1,"71":1,"77":1,"78":1,"82":1,"83":1,"84":1,"87":1,"93":3,"94":3,"96":1,"97":2,"98":2,"102":1,"106":2,"107":1,"108":9,"112":1,"114":2,"115":1,"118":1,"120":1,"126":2,"130":1,"140":1,"141":1,"142":3,"152":2,"153":2,"154":1,"156":1,"166":1,"167":2,"168":1,"169":2,"171":1,"174":1,"175":2,"177":2,"181":10,"182":23,"183":110}}],["an",{"0":{"78":2,"79":1},"2":{"0":2,"2":3,"6":1,"7":3,"11":4,"13":1,"18":1,"20":1,"21":1,"22":1,"23":1,"32":1,"41":1,"42":2,"48":1,"49":2,"50":3,"51":1,"52":3,"54":1,"55":13,"56":1,"57":2,"58":4,"60":2,"61":1,"63":2,"64":11,"66":1,"67":5,"69":3,"77":2,"78":1,"82":1,"83":1,"84":1,"93":3,"96":2,"97":1,"99":2,"100":1,"101":2,"102":1,"105":1,"106":4,"108":8,"110":1,"112":1,"117":2,"118":2,"121":5,"122":1,"124":1,"126":1,"128":1,"130":1,"131":2,"137":1,"138":1,"140":1,"142":1,"150":1,"154":1,"164":1,"168":1,"171":2,"177":1,"179":3,"181":22,"182":34,"183":144}}],["and",{"0":{"21":1,"24":1,"76":1},"2":{"0":6,"1":5,"2":12,"3":2,"4":2,"6":3,"7":17,"8":5,"9":15,"11":13,"12":3,"13":3,"14":2,"17":2,"18":3,"19":1,"20":4,"22":9,"23":2,"24":6,"26":2,"27":3,"28":2,"29":2,"30":3,"31":3,"32":2,"34":1,"35":2,"36":2,"37":4,"41":2,"42":5,"45":1,"47":1,"48":1,"49":2,"51":1,"52":11,"54":9,"55":58,"56":1,"57":1,"58":11,"60":7,"62":3,"63":5,"64":47,"65":2,"66":6,"67":10,"69":4,"70":1,"71":2,"72":2,"74":1,"75":1,"76":1,"77":2,"78":2,"79":3,"80":2,"81":2,"82":2,"83":2,"84":1,"85":2,"87":1,"88":1,"89":3,"90":2,"91":3,"92":1,"93":13,"94":6,"95":3,"96":1,"97":5,"98":4,"99":2,"100":2,"101":1,"102":2,"104":2,"105":5,"106":14,"107":10,"108":27,"110":1,"114":6,"115":1,"117":3,"118":4,"120":5,"121":3,"122":3,"124":2,"125":1,"126":3,"127":1,"128":1,"130":10,"131":4,"132":1,"136":2,"138":2,"140":16,"141":5,"142":10,"144":5,"145":4,"147":4,"149":1,"150":4,"152":17,"153":16,"154":3,"156":1,"157":2,"158":5,"159":2,"160":9,"161":3,"162":2,"163":2,"164":7,"165":2,"166":4,"167":5,"168":2,"169":3,"170":4,"171":10,"173":2,"174":3,"175":5,"177":1,"178":2,"179":1,"180":2,"181":98,"182":190,"183":266}}],["aiprefill",{"2":{"183":5}}],["aims",{"2":{"182":4}}],["aimessage>",{"2":{"12":1,"183":1}}],["aimessage",{"2":{"2":1,"9":1,"11":3,"13":2,"21":2,"23":1,"24":1,"26":1,"30":1,"31":1,"34":1,"35":1,"36":1,"39":1,"40":1,"41":1,"42":1,"52":1,"55":6,"58":2,"60":1,"61":2,"71":2,"72":1,"92":2,"94":2,"100":2,"104":1,"106":3,"107":2,"181":11,"183":73}}],["aiagent",{"2":{"181":1}}],["aihelpme",{"2":{"94":1}}],["air",{"2":{"67":1,"183":1}}],["airetry",{"0":{"22":1},"2":{"11":1,"22":8,"52":3,"54":8,"55":14,"93":6,"106":1,"108":6,"181":15,"183":1}}],["airag",{"2":{"2":2,"6":1,"7":2,"11":1,"57":1,"58":3,"61":1,"62":1,"63":1,"64":7,"182":25,"183":1}}],["aiclassifier",{"2":{"93":1}}],["aiclassify",{"2":{"0":1,"11":2,"14":1,"18":1,"19":2,"55":1,"93":4,"106":1,"136":1,"181":7,"183":21}}],["aicodefixer",{"2":{"52":2,"55":9,"130":1,"131":1,"132":1,"180":1,"181":31,"183":2}}],["aicode",{"2":{"52":3,"55":16,"181":6,"183":13}}],["aicallblock",{"2":{"55":4,"181":13}}],["aicall",{"2":{"11":2,"22":9,"52":3,"54":9,"55":53,"106":2,"108":4,"181":102,"183":2}}],["aitoolrequest",{"2":{"183":4}}],["aitools",{"2":{"0":1,"106":1,"183":16}}],["aitemplatemetadata",{"2":{"9":2,"14":3,"94":2,"183":14}}],["aitemplate",{"2":{"9":4,"14":3,"64":1,"97":2,"105":2,"107":2,"181":4,"182":1,"183":19}}],["aitemplates",{"0":{"9":1},"2":{"9":3,"11":4,"14":4,"94":1,"105":2,"106":3,"182":2,"183":23}}],["aiimage",{"2":{"0":2,"11":2,"106":1,"183":8}}],["aiscan",{"0":{"43":1},"2":{"0":2,"11":2,"21":3,"43":1,"106":1,"181":6,"183":19}}],["aiextract",{"0":{"108":1},"2":{"0":2,"6":1,"11":2,"20":2,"31":2,"55":1,"64":3,"104":1,"106":2,"108":5,"144":1,"145":1,"147":1,"181":7,"182":6,"183":44}}],["aiembed",{"0":{"44":1},"1":{"45":1,"46":1,"47":1},"2":{"0":1,"11":2,"17":3,"23":3,"24":1,"27":1,"29":1,"30":1,"31":1,"45":2,"46":2,"47":1,"55":1,"64":2,"104":1,"106":2,"181":7,"182":3,"183":26}}],["aigenerate",{"0":{"33":1,"38":1,"72":1,"86":1,"107":1},"1":{"34":1,"35":1,"36":1,"39":1,"40":1,"41":1,"42":1},"2":{"0":1,"9":1,"11":9,"13":4,"14":3,"15":2,"22":5,"23":2,"24":4,"26":2,"27":2,"28":1,"29":2,"30":1,"31":1,"32":1,"34":1,"35":1,"37":1,"39":1,"40":2,"41":1,"42":2,"52":10,"54":5,"55":18,"64":6,"72":1,"78":1,"79":1,"83":3,"92":3,"93":6,"97":2,"98":3,"99":1,"100":1,"106":11,"107":2,"108":11,"180":1,"181":38,"182":16,"183":67}}],["ai",{"0":{"11":1,"30":1,"31":1,"32":1,"37":1,"54":1,"71":1,"106":1},"1":{"33":1,"34":1,"35":1,"36":1,"38":1,"39":1,"40":1,"41":1,"42":1,"43":1,"44":1,"45":1,"46":1,"47":1},"2":{"0":5,"1":1,"9":1,"11":12,"12":1,"13":1,"15":1,"16":3,"17":1,"18":1,"22":4,"23":1,"24":2,"27":2,"29":1,"30":2,"31":3,"32":2,"34":1,"36":1,"37":2,"52":16,"54":1,"55":20,"56":1,"57":1,"58":1,"64":6,"65":1,"66":2,"67":1,"71":5,"72":3,"74":2,"78":1,"87":1,"88":1,"89":1,"92":6,"94":2,"96":1,"97":3,"100":2,"102":1,"103":1,"104":3,"105":3,"106":13,"107":3,"108":8,"110":1,"117":2,"118":4,"120":1,"121":1,"124":1,"137":1,"140":4,"141":5,"142":2,"150":1,"152":1,"153":1,"157":1,"158":1,"159":1,"171":3,"181":51,"182":14,"183":115}}],["cc",{"2":{"182":1}}],["cn",{"2":{"67":1,"183":1}}],["cfg",{"2":{"61":1,"62":1,"64":9,"182":19}}],["cb",{"2":{"55":15,"181":3,"183":36}}],["cpp",{"0":{"28":1},"2":{"25":1,"28":3,"75":1,"100":1,"101":1,"183":1}}],["c",{"2":{"20":1,"28":2,"67":1,"79":2,"81":2,"181":1,"182":2,"183":1}}],["city",{"2":{"20":3}}],["cerebras",{"2":{"183":6}}],["cerebrasopenaischema",{"2":{"183":2}}],["certainly",{"2":{"67":1,"183":1}}],["certain",{"2":{"0":1,"16":1,"93":1,"183":3}}],["ceo",{"2":{"154":1}}],["cents",{"2":{"82":2}}],["cent",{"2":{"80":1,"82":1}}],["celestial",{"2":{"67":2,"183":2}}],["celsius",{"2":{"20":2}}],["curiosity",{"2":{"183":1}}],["curr",{"2":{"182":3}}],["currently",{"2":{"24":1,"26":1,"49":1,"55":1,"61":1,"89":1,"181":2,"183":11}}],["currentweather",{"2":{"20":2}}],["current",{"2":{"20":2,"37":1,"55":1,"70":1,"80":1,"93":1,"98":1,"181":3,"182":1,"183":10}}],["cumulative",{"2":{"182":2}}],["customizing",{"2":{"181":1}}],["customized",{"2":{"91":1,"152":1}}],["customize",{"2":{"55":1,"58":1,"60":4,"64":6,"90":1,"91":1,"181":1,"182":8}}],["customer",{"0":{"83":1},"2":{"83":1}}],["custom",{"0":{"25":1,"42":1,"91":1},"1":{"26":1,"27":1,"28":1,"29":1,"30":1,"31":1},"2":{"55":1,"56":1,"60":5,"62":5,"64":13,"67":2,"91":3,"93":2,"181":1,"182":14,"183":9}}],["customopenaischema",{"2":{"0":3,"24":2,"27":2,"28":1,"183":7}}],["cut",{"2":{"16":1}}],["crucial",{"2":{"183":1}}],["crunchy",{"2":{"31":2,"108":2}}],["craft",{"2":{"160":1,"171":1}}],["critiquing",{"2":{"140":1}}],["critique>",{"2":{"130":1}}],["critiques",{"2":{"130":1,"140":1,"141":1,"142":2}}],["critique",{"2":{"64":1,"130":12,"140":2,"141":2,"182":1}}],["critic",{"0":{"139":1},"1":{"140":1,"141":1,"142":1},"2":{"140":1,"141":4,"142":1}}],["criticism",{"2":{"130":1}}],["criterion",{"2":{"121":1}}],["criteria",{"2":{"121":2,"182":1}}],["credit",{"2":{"108":1,"183":3}}],["credits",{"2":{"69":2,"75":1,"80":1,"83":1}}],["creativity",{"2":{"183":1}}],["creative",{"2":{"11":1,"106":1,"183":2}}],["creation",{"2":{"120":1}}],["creating",{"0":{"77":1},"2":{"58":1,"64":1,"160":1,"182":4}}],["creature",{"2":{"19":2,"93":1,"183":5}}],["createqafromcontext",{"2":{"64":1,"182":1}}],["creates",{"2":{"55":1,"63":1,"94":1,"97":1,"181":5,"182":1,"183":3}}],["create",{"0":{"94":1},"2":{"2":2,"9":2,"11":1,"17":1,"49":1,"50":3,"52":1,"55":1,"60":1,"64":5,"69":2,"77":2,"91":3,"94":6,"96":1,"100":1,"105":3,"106":1,"107":1,"108":1,"158":1,"166":1,"174":1,"179":4,"181":3,"182":14,"183":27}}],["cross",{"2":{"7":1}}],["ctx",{"2":{"6":6,"7":2,"182":4}}],["click",{"2":{"69":1,"77":1}}],["clipboard",{"2":{"55":2,"183":2}}],["clearly",{"2":{"60":1,"153":1,"154":1}}],["clear",{"2":{"55":1,"120":1,"121":3,"122":1,"130":1,"150":1,"152":1,"153":2,"154":1,"160":2,"161":1,"163":1,"164":1,"170":1,"181":1}}],["cleaning",{"2":{"159":1}}],["cleanup",{"2":{"9":1}}],["cleaner",{"2":{"9":1,"93":1}}],["cleaned",{"2":{"1":1}}],["clustering",{"2":{"17":1}}],["closely",{"2":{"67":2,"140":1,"178":1,"183":2}}],["close",{"2":{"67":2,"141":1,"160":1,"183":2}}],["closest",{"2":{"2":5,"61":1,"63":1,"64":4,"67":4,"182":45,"183":9}}],["cloudy",{"2":{"183":4}}],["cloud",{"2":{"13":1}}],["claudes",{"2":{"183":1}}],["claudeo",{"2":{"183":2}}],["claudeh",{"2":{"183":23}}],["claude",{"2":{"183":8}}],["clarification",{"2":{"141":1}}],["clarity",{"2":{"6":1,"121":1,"140":4,"141":1,"153":3,"168":1,"169":1}}],["classes=",{"2":{"182":3}}],["classes",{"2":{"182":5}}],["classifies",{"2":{"183":2}}],["classified",{"2":{"171":3,"183":1}}],["classification",{"0":{"18":1,"135":1},"1":{"19":1,"136":1,"137":1,"138":1},"2":{"19":1,"136":2,"137":1,"138":1,"183":4}}],["classify",{"2":{"11":1,"18":1,"106":1,"183":3}}],["class",{"2":{"9":4,"14":1,"104":1,"105":1,"107":2,"108":1,"110":1,"114":1,"117":1,"118":1,"120":1,"124":1,"125":1,"127":1,"136":1,"140":1,"141":1,"142":1,"144":1,"145":1,"147":1,"150":1,"154":1,"157":1,"158":1,"159":1,"160":1,"161":1,"162":1,"163":1,"164":1,"165":1,"166":1,"167":1,"168":1,"169":1,"170":1,"173":1,"174":1,"175":1,"178":1,"182":1,"183":2}}],["child2",{"2":{"181":2}}],["child11",{"2":{"181":2}}],["child1",{"2":{"181":3}}],["children",{"2":{"64":2,"181":2,"182":9}}],["chief",{"2":{"140":4}}],["chiefeditortranscriptcritic",{"0":{"140":1}}],["chuckles",{"2":{"41":2}}],["chunkdata",{"2":{"182":13,"183":3}}],["chunkkeywordsindex",{"2":{"64":4,"182":13,"183":2}}],["chunked",{"2":{"64":1,"182":8}}],["chunkembeddingsindex",{"2":{"64":3,"182":7,"183":1}}],["chunkers",{"2":{"182":1}}],["chunker=filechunker",{"2":{"64":1,"182":1}}],["chunker",{"2":{"58":3,"60":2,"64":14,"182":29}}],["chunking",{"2":{"60":1,"63":1,"64":1,"182":2}}],["chunkindex",{"2":{"57":1,"182":3}}],["chunk",{"2":{"2":3,"8":1,"11":1,"58":2,"63":3,"64":6,"67":8,"95":1,"114":1,"120":4,"182":25,"183":31}}],["chunks`",{"2":{"183":3}}],["chunks",{"2":{"2":13,"3":1,"4":2,"8":1,"57":2,"58":2,"60":3,"61":3,"63":5,"64":35,"67":13,"95":1,"112":1,"182":146,"183":31}}],["cheaper",{"2":{"22":1,"54":1,"55":1,"181":1,"183":2}}],["cheap",{"2":{"19":1,"80":1,"183":1}}],["checkout",{"2":{"67":1,"183":1}}],["check",{"2":{"22":1,"23":1,"54":1,"55":7,"64":1,"69":1,"76":1,"78":1,"80":1,"84":1,"85":1,"89":1,"91":1,"93":3,"96":1,"108":2,"142":1,"166":1,"168":1,"169":1,"174":1,"181":5,"182":4,"183":14}}],["checks",{"2":{"18":1,"54":1,"55":4,"82":1,"181":6,"183":2}}],["checking",{"2":{"18":1}}],["choice=",{"2":{"183":1}}],["choice",{"2":{"108":1,"138":3,"181":1,"183":19}}],["choices",{"2":{"11":3,"19":2,"93":2,"106":3,"107":1,"136":6,"138":6,"183":42}}],["chosen",{"2":{"22":1,"54":1,"55":1,"181":1}}],["choose",{"2":{"0":1,"154":1,"182":2,"183":1}}],["chapter",{"2":{"152":2}}],["chapters",{"2":{"152":4,"153":1}}],["chain",{"2":{"144":1,"166":1,"168":1,"174":1,"183":2}}],["chars",{"2":{"67":1,"183":1}}],["character",{"2":{"67":3,"178":1,"182":1,"183":3}}],["characters",{"2":{"36":1,"67":4,"130":1,"181":4,"182":13,"183":5}}],["charles",{"2":{"50":2,"179":2}}],["charge",{"2":{"36":2,"80":2,"183":1}}],["chance",{"2":{"182":1}}],["chances",{"2":{"22":1,"54":1,"55":1,"181":1}}],["channel",{"2":{"57":1,"66":1,"183":3}}],["changing",{"0":{"90":1},"2":{"55":1,"58":1,"60":1,"64":1,"182":3,"183":1}}],["changed",{"2":{"130":1}}],["changes",{"0":{"42":1},"2":{"37":1,"94":1,"118":1,"142":1,"181":1,"182":4,"183":3}}],["change",{"2":{"1":1,"9":1,"22":1,"42":1,"54":1,"55":1,"64":3,"91":1,"108":1,"130":1,"180":1,"181":3,"182":8,"183":9}}],["challenging",{"2":{"0":1}}],["chat1",{"2":{"183":1}}],["chatmlschema",{"2":{"183":5}}],["chatgpt",{"2":{"12":1,"80":1,"182":3}}],["chatbots",{"2":{"183":1}}],["chatbot",{"2":{"1":1,"2":1}}],["chat",{"2":{"0":1,"29":3,"39":1,"42":1,"64":8,"90":2,"91":2,"100":1,"107":2,"182":25,"183":34}}],["cababcab",{"2":{"183":1}}],["caching",{"2":{"183":11}}],["caches",{"2":{"183":6}}],["cache",{"2":{"78":1,"183":20}}],["caused",{"2":{"130":1}}],["causes",{"2":{"41":1}}],["caught",{"2":{"55":1,"181":1,"183":1}}],["capable",{"2":{"138":1,"183":1}}],["capabilities",{"2":{"55":1,"74":1,"183":1}}],["captioning",{"2":{"183":2}}],["captain",{"2":{"94":2,"183":2}}],["capturing",{"2":{"55":3,"178":1,"183":3}}],["captures",{"2":{"98":1,"177":1,"183":1}}],["captured",{"2":{"55":3,"183":3}}],["capture",{"2":{"55":5,"98":2,"108":1,"181":1,"183":11}}],["capital",{"2":{"16":1,"64":2,"71":5,"72":2,"107":4,"182":5}}],["cartoonish",{"2":{"177":2}}],["cartesian",{"2":{"7":1}}],["carries",{"2":{"168":1,"169":1}}],["carrying",{"2":{"67":1,"183":1}}],["carefully",{"2":{"114":1,"122":1,"142":1,"154":1}}],["care",{"2":{"102":1}}],["car",{"2":{"93":2,"108":1,"183":7}}],["carve",{"2":{"56":1}}],["carlo",{"2":{"22":1,"52":1,"55":1,"181":3}}],["ca",{"2":{"20":1}}],["casual",{"2":{"160":1}}],["castle",{"2":{"19":1,"183":1}}],["cased",{"2":{"158":1}}],["cases",{"2":{"55":1,"167":1,"175":1,"181":1,"183":7}}],["case",{"2":{"9":1,"58":1,"74":1,"93":2,"96":1,"107":1,"114":1,"160":1,"182":2,"183":3}}],["cat",{"2":{"183":7}}],["categorize",{"2":{"154":1}}],["categories",{"0":{"19":1},"2":{"19":1,"93":2,"114":1,"115":1,"138":2,"183":2}}],["category",{"2":{"93":2,"114":1,"115":1,"138":4,"182":1,"183":1}}],["catch",{"2":{"0":1,"20":1,"22":2,"52":1,"54":2,"55":4,"108":1,"181":9,"183":1}}],["camelcase",{"2":{"22":1,"183":1}}],["came",{"2":{"11":1,"182":1}}],["calculating",{"2":{"183":1}}],["calculation",{"2":{"182":1,"183":1}}],["calculates",{"2":{"182":3}}],["calculated",{"2":{"79":1,"171":1,"183":2}}],["calculate",{"2":{"7":1,"17":2,"183":10}}],["calltracer",{"2":{"183":2}}],["callable",{"2":{"183":9}}],["callbacks",{"2":{"183":1}}],["callback",{"2":{"183":34}}],["call",{"2":{"9":1,"11":2,"12":1,"16":1,"22":2,"29":1,"52":3,"54":2,"55":15,"63":1,"64":10,"78":1,"82":1,"92":1,"94":2,"100":1,"105":1,"106":2,"108":2,"144":1,"145":1,"147":1,"181":44,"182":19,"183":85}}],["calling",{"2":{"9":1,"31":1,"37":1,"52":1,"55":1,"108":2,"144":1,"145":1,"147":1,"181":2,"182":1,"183":12}}],["called",{"2":{"7":3,"9":1,"11":1,"12":1,"22":1,"52":1,"55":1,"63":4,"71":1,"94":1,"100":1,"106":1,"181":4,"182":4,"183":3}}],["calls",{"0":{"54":1},"2":{"0":2,"11":1,"22":4,"46":1,"50":1,"52":2,"54":1,"55":6,"62":1,"64":10,"76":1,"93":1,"106":2,"108":2,"144":1,"145":1,"147":1,"179":1,"180":1,"181":13,"182":22,"183":41}}],["cannot",{"0":{"75":1,"78":2},"2":{"18":1,"64":1,"67":1,"74":1,"83":1,"108":2,"137":1,"182":4,"183":6}}],["candidatechunks",{"2":{"58":4,"63":1,"64":1,"182":13,"183":1}}],["candidate",{"2":{"2":1,"182":17}}],["candidates",{"2":{"2":1,"58":4,"60":1,"63":3,"64":2,"182":40}}],["can",{"0":{"97":1},"2":{"2":1,"6":2,"7":7,"8":1,"9":11,"11":4,"12":1,"13":4,"14":2,"15":2,"16":5,"17":2,"18":2,"19":2,"20":2,"21":4,"22":8,"23":3,"24":9,"26":7,"27":1,"28":2,"29":3,"30":5,"31":4,"32":1,"33":1,"35":2,"39":1,"40":1,"42":2,"43":1,"46":2,"50":2,"52":2,"54":7,"55":22,"58":4,"60":1,"62":2,"63":2,"64":10,"66":1,"67":4,"69":1,"70":1,"71":1,"72":2,"74":1,"76":2,"77":1,"78":3,"79":2,"81":1,"82":2,"83":2,"84":1,"85":1,"87":2,"89":2,"90":3,"91":3,"92":3,"93":6,"94":8,"96":2,"97":2,"98":8,"99":1,"100":2,"101":2,"103":2,"105":3,"106":4,"107":1,"108":10,"112":1,"114":1,"120":1,"130":1,"131":1,"132":1,"152":1,"167":1,"175":1,"179":2,"181":23,"182":27,"183":162}}],["cognitive",{"2":{"183":2}}],["copies",{"2":{"182":1}}],["copy",{"2":{"2":1,"23":1,"45":2,"55":1,"183":3}}],["coding",{"2":{"181":1}}],["codeunits",{"2":{"183":3}}],["code>",{"2":{"130":1,"175":6}}],["codefixer",{"2":{"181":4}}],["codefixertiny",{"0":{"132":1}}],["codefixershort",{"0":{"131":1}}],["codefixerrci",{"0":{"130":1},"2":{"181":1}}],["codefailedtimeout",{"2":{"55":1,"181":1}}],["codefailedeval",{"2":{"55":1,"181":1}}],["codefailedparse",{"2":{"55":1,"181":1}}],["codellama",{"2":{"89":1}}],["codes",{"2":{"80":1}}],["codesuccess",{"2":{"55":1,"181":1}}],["codeempty",{"2":{"55":1,"181":1}}],["code",{"0":{"129":1},"1":{"130":1,"131":1,"132":1},"2":{"9":2,"21":3,"24":1,"26":1,"51":1,"52":1,"55":49,"56":1,"64":4,"66":2,"93":1,"104":1,"114":1,"130":13,"131":9,"132":3,"142":11,"164":1,"166":3,"167":12,"174":3,"175":10,"180":1,"181":36,"182":14,"183":94}}],["coalitional",{"2":{"171":1}}],["cot",{"2":{"166":1,"168":1,"174":1}}],["core",{"2":{"127":1,"181":1}}],["corpus",{"2":{"57":1}}],["corresponds",{"2":{"183":2}}],["correspondence",{"2":{"160":1}}],["correspond",{"2":{"58":1,"64":1,"181":1,"182":4}}],["corresponding",{"2":{"0":4,"13":1,"30":1,"31":1,"52":2,"60":2,"62":1,"63":2,"64":4,"75":1,"108":2,"142":1,"152":1,"153":1,"154":1,"171":1,"182":9,"183":25}}],["correctiverag",{"2":{"182":1}}],["correcting",{"2":{"142":1}}],["corrections",{"2":{"140":1}}],["correct",{"2":{"55":4,"93":1,"108":3,"130":1,"142":1,"166":1,"167":4,"168":1,"169":1,"174":1,"175":4,"181":4,"182":1,"183":1}}],["correctly",{"2":{"0":1,"171":3,"183":7}}],["covering",{"2":{"167":1,"175":1}}],["cover",{"2":{"121":1}}],["coversation",{"2":{"55":1,"181":1}}],["coverage",{"0":{"0":1}}],["cosmic",{"2":{"67":1,"183":1}}],["cosinesimilarity",{"2":{"64":2,"182":12,"183":1}}],["cosine",{"2":{"17":2,"47":2,"182":5,"183":4}}],["cost2",{"2":{"183":2}}],["cost1",{"2":{"183":3}}],["costing",{"2":{"183":2}}],["costs",{"2":{"11":1,"64":1,"81":1,"106":1,"182":1,"183":9}}],["cost",{"0":{"82":1},"2":{"4":1,"9":1,"12":1,"14":1,"21":2,"24":1,"26":1,"30":1,"31":1,"58":1,"64":16,"71":2,"72":1,"74":1,"82":1,"92":1,"182":50,"183":76}}],["counds",{"2":{"181":1}}],["counts",{"2":{"182":1}}],["counted",{"2":{"181":1}}],["counter",{"2":{"64":3,"181":2,"182":7}}],["counterpart",{"2":{"52":1,"67":1,"183":1}}],["counterparts",{"2":{"11":1,"22":1,"52":1,"106":1}}],["counting",{"2":{"158":1}}],["country=",{"2":{"72":1}}],["country",{"2":{"71":1,"72":1,"182":4}}],["count",{"2":{"28":1,"37":1,"64":1,"110":1,"112":1,"114":1,"115":1,"117":1,"118":1,"120":1,"121":1,"122":1,"124":1,"125":1,"126":1,"127":1,"128":1,"130":1,"131":1,"132":1,"134":1,"136":1,"137":1,"138":1,"140":1,"141":1,"142":1,"144":1,"145":1,"147":1,"149":1,"150":1,"152":1,"153":1,"154":1,"157":1,"158":1,"159":1,"160":1,"161":1,"162":1,"163":1,"164":1,"165":1,"166":1,"167":1,"168":1,"169":1,"170":1,"171":1,"173":1,"174":1,"175":1,"177":1,"178":1,"182":1,"183":14}}],["couldn",{"2":{"183":1}}],["could",{"2":{"7":1,"8":1,"22":2,"34":1,"54":2,"55":2,"64":2,"67":2,"79":1,"97":1,"108":1,"126":1,"150":1,"181":2,"182":2,"183":7}}],["collects",{"2":{"181":1}}],["collect",{"2":{"181":2,"183":2}}],["collection",{"2":{"9":1,"64":1,"182":1}}],["collaboration",{"2":{"160":1}}],["colorful",{"2":{"177":1,"183":1}}],["colors",{"2":{"64":1,"182":1}}],["color",{"2":{"22":2,"54":2,"55":2,"58":3,"67":1,"181":2,"182":8,"183":7}}],["column",{"2":{"7":2,"182":4,"183":2}}],["columns",{"2":{"6":1,"7":4,"182":1}}],["coherence",{"2":{"183":1}}],["coherereranker",{"2":{"64":1,"182":5,"183":1}}],["cohere",{"2":{"2":1,"8":1,"64":2,"182":19,"183":6}}],["conv",{"2":{"97":1,"98":2,"108":2,"183":36}}],["conventions",{"2":{"183":1}}],["convention",{"2":{"183":1}}],["convenient",{"2":{"183":2}}],["convenience",{"2":{"2":1,"67":2,"182":1,"183":5}}],["convey",{"2":{"100":1,"103":1}}],["conversion",{"2":{"108":1}}],["conversational",{"2":{"183":2}}],["conversation=myconversation",{"2":{"181":1}}],["conversationlabeler",{"0":{"158":1}}],["conversation2",{"2":{"96":1}}],["conversation1",{"2":{"96":1}}],["conversations",{"0":{"13":1,"92":1},"2":{"13":1,"35":1,"42":2,"55":1,"58":2,"64":2,"72":1,"96":3,"98":2,"181":1,"182":9,"183":26}}],["conversation",{"2":{"11":2,"13":6,"22":1,"30":1,"35":1,"41":1,"54":1,"55":25,"64":1,"71":1,"92":11,"93":4,"94":1,"96":3,"98":2,"103":2,"106":2,"108":3,"140":2,"141":5,"142":1,"158":6,"181":71,"182":5,"183":240}}],["converts",{"2":{"183":1}}],["converted",{"2":{"183":2}}],["convert",{"2":{"107":2,"108":1,"182":4,"183":13}}],["converting",{"2":{"83":1,"126":1,"128":1}}],["confusion",{"2":{"41":1}}],["confirm",{"2":{"183":2}}],["config=retryconfig",{"2":{"93":1,"108":1}}],["configures",{"2":{"183":1}}],["configure",{"2":{"183":9}}],["configuring",{"0":{"84":1}}],["configuration",{"2":{"64":1,"84":2,"181":1,"182":3,"183":1}}],["configurable",{"2":{"58":1}}],["config",{"2":{"22":2,"52":1,"54":2,"55":9,"64":1,"87":1,"108":2,"181":14,"182":3,"183":1}}],["confidence",{"2":{"11":1,"67":1,"181":2,"183":1}}],["confident",{"2":{"9":3,"105":1,"107":2,"157":1,"161":1,"163":1,"165":1,"170":1,"173":1}}],["connection",{"2":{"35":1}}],["conducted",{"2":{"181":1}}],["cond",{"2":{"22":1,"54":1,"55":6,"181":15}}],["condition=>string",{"2":{"183":1}}],["conditions",{"2":{"183":1}}],["condition",{"2":{"22":4,"54":5,"55":16,"93":3,"108":3,"181":31,"183":19}}],["concatenation",{"2":{"182":1}}],["concatenates",{"2":{"183":1}}],["concatenate",{"2":{"36":1,"55":1,"182":1,"183":1}}],["concentrate",{"2":{"171":1}}],["concepts",{"0":{"100":1},"1":{"101":1,"102":1,"103":1,"104":1,"105":1,"106":1},"2":{"99":1,"100":1,"126":1}}],["concept",{"2":{"17":1,"126":1}}],["conclusion",{"2":{"164":1}}],["conclusions",{"2":{"152":1}}],["conclude",{"2":{"140":1,"142":1,"171":1}}],["concrete",{"2":{"108":1}}],["concurrent",{"2":{"15":1,"79":2}}],["concurrently",{"2":{"15":1,"72":1}}],["concise",{"2":{"9":3,"104":1,"105":1,"107":2,"110":1,"117":1,"118":1,"120":1,"127":1,"130":1,"152":3,"153":2,"156":1,"157":1,"158":1,"159":1,"160":2,"161":1,"163":1,"164":1,"165":1,"166":1,"170":1,"173":1,"174":1,"183":6}}],["contrast",{"2":{"183":1}}],["control",{"2":{"71":1,"83":1,"93":1,"168":1,"169":1,"182":2}}],["controlling",{"2":{"55":1,"181":1}}],["controlled",{"2":{"22":1}}],["contribute",{"2":{"58":1}}],["contribution",{"2":{"52":1,"171":1}}],["continuous",{"2":{"76":1}}],["continued",{"2":{"183":1}}],["continues>",{"2":{"183":5}}],["continue",{"2":{"55":1,"71":1,"181":1,"183":6}}],["continue>",{"2":{"21":1,"183":2}}],["continuing",{"2":{"12":1,"82":1}}],["container",{"2":{"183":1}}],["contained",{"2":{"182":2}}],["containing",{"2":{"11":3,"64":4,"106":3,"181":1,"182":13,"183":7}}],["contain",{"2":{"7":1,"11":1,"106":1,"182":2,"183":5}}],["contains",{"2":{"6":1,"7":8,"9":1,"32":1,"37":1,"58":1,"60":1,"66":1,"104":1,"180":1,"181":1,"182":2,"183":3}}],["contemporary",{"2":{"16":1,"183":1}}],["content=",{"2":{"183":8}}],["contents",{"2":{"182":2}}],["content",{"2":{"6":1,"11":11,"17":3,"20":2,"21":2,"22":1,"23":2,"24":1,"27":1,"31":1,"45":1,"46":1,"47":2,"50":2,"54":1,"55":5,"64":1,"71":2,"93":3,"97":2,"106":11,"107":5,"108":3,"140":3,"152":1,"153":2,"179":2,"181":10,"182":6,"183":132}}],["context=true",{"2":{"182":1}}],["context=",{"2":{"182":4}}],["contexts",{"2":{"181":1}}],["contextual",{"2":{"64":1,"120":1,"182":2}}],["contextenumerator",{"2":{"64":3,"182":11,"183":1}}],["contexter",{"2":{"64":8,"182":14}}],["context",{"2":{"2":2,"5":1,"6":3,"8":3,"11":2,"12":2,"28":1,"49":1,"55":1,"58":6,"60":1,"61":1,"63":5,"64":32,"66":1,"67":9,"103":2,"110":7,"117":11,"118":1,"120":11,"121":8,"122":6,"126":1,"140":1,"150":1,"153":1,"154":1,"171":1,"181":4,"182":103,"183":17}}],["consecutive",{"2":{"182":2}}],["conservative",{"2":{"36":1,"183":1}}],["consumed",{"2":{"183":2}}],["consumer",{"2":{"76":1}}],["consuming",{"2":{"3":1}}],["considered",{"2":{"64":1,"67":1,"182":2,"183":1}}],["considering",{"2":{"58":1,"121":1,"182":1,"183":2}}],["consider",{"2":{"9":1,"64":2,"126":1,"140":1,"171":1,"182":2,"183":1}}],["consistent",{"2":{"121":2,"122":1,"158":1,"182":2}}],["consistency",{"2":{"6":1,"36":1,"121":1,"140":1,"182":1,"183":3}}],["consisting",{"2":{"21":1,"183":2}}],["consists",{"2":{"7":1}}],["constant",{"2":{"183":9}}],["constituent",{"2":{"141":1}}],["constrained",{"2":{"183":2}}],["constraints",{"2":{"67":1,"141":1,"183":1}}],["construct",{"2":{"183":1}}],["constructor",{"2":{"183":1}}],["constructive",{"2":{"140":1}}],["constructs",{"2":{"63":1}}],["const",{"2":{"1":2,"9":1,"25":1,"32":1,"37":1,"51":1,"56":1,"87":1,"107":1,"108":1,"183":3}}],["combination",{"2":{"182":2,"183":1}}],["combining",{"2":{"56":1}}],["combines",{"2":{"64":2,"182":2}}],["combined",{"2":{"7":1,"182":1}}],["combine",{"2":{"5":1,"6":2,"7":5}}],["com",{"2":{"21":1,"49":2,"67":1,"80":1,"112":1,"179":1,"182":5,"183":16}}],["comes",{"2":{"96":1,"182":1}}],["come",{"2":{"14":1,"183":3}}],["commas",{"2":{"182":1}}],["commands",{"2":{"67":4,"70":1,"183":4}}],["command",{"2":{"9":1,"28":1,"67":1,"103":1,"183":1}}],["comments",{"2":{"82":1,"130":1,"131":1,"183":1}}],["comment",{"2":{"82":1,"117":1,"118":1,"167":1,"175":1}}],["commercial",{"2":{"82":1}}],["commit",{"2":{"69":1,"84":1}}],["communicates",{"2":{"140":1}}],["communications",{"2":{"160":2}}],["communication",{"2":{"9":3,"100":1,"102":1,"104":1,"105":1,"107":2,"157":1,"159":1,"160":3,"161":1,"163":1,"165":1,"166":1,"170":1,"173":1,"174":1,"181":1,"183":1}}],["community",{"2":{"9":1,"58":1}}],["commun",{"2":{"9":1,"14":1,"183":2}}],["commonly",{"2":{"183":1}}],["common",{"2":{"1":1,"7":2,"66":7,"67":17,"93":1,"96":1,"181":1,"183":20}}],["compelling",{"2":{"164":1,"171":1}}],["complicated",{"2":{"93":2,"108":1}}],["complicity",{"2":{"67":1,"183":1}}],["completions",{"2":{"183":5}}],["completions`",{"2":{"183":1}}],["completion",{"2":{"183":7}}],["completeling",{"2":{"183":1}}],["completely",{"2":{"64":1,"66":1,"182":2}}],["completeness",{"2":{"6":1,"121":1,"141":1}}],["complete",{"2":{"5":2,"6":1,"7":2,"121":1}}],["complement",{"2":{"180":1}}],["complex",{"2":{"13":1,"17":1,"18":1,"20":1,"22":1,"58":1,"72":1,"181":2,"183":6}}],["compact",{"2":{"167":1,"175":1,"182":1}}],["compass",{"2":{"94":2,"183":2}}],["company",{"2":{"67":2,"91":1,"183":2}}],["companion",{"2":{"66":1}}],["comparable",{"2":{"83":1}}],["comparing",{"2":{"67":1,"183":1}}],["comparison",{"2":{"58":2}}],["compared",{"2":{"182":1}}],["compare",{"2":{"66":1,"67":2,"181":1,"183":2}}],["compatibility",{"2":{"42":1,"183":6}}],["compatible",{"0":{"24":1,"27":1},"2":{"0":2,"24":2,"25":1,"27":1,"183":2}}],["computes",{"2":{"182":1}}],["compute",{"2":{"67":1,"182":1,"183":1}}],["computer",{"2":{"23":1,"24":1}}],["computational",{"2":{"58":1}}],["computing",{"2":{"58":8,"64":1,"182":6}}],["comprehensively",{"2":{"120":1,"167":1,"175":1}}],["comprehensive",{"2":{"58":2,"150":1,"152":1}}],["comprehension",{"0":{"21":1}}],["composite",{"2":{"182":2}}],["composes",{"2":{"183":1}}],["compose",{"2":{"98":1,"120":1}}],["composed",{"2":{"7":1,"183":3}}],["components",{"2":{"62":2,"64":1,"182":3}}],["component",{"2":{"55":1,"62":1,"64":1,"181":2,"182":2}}],["compiled",{"2":{"64":1,"78":3,"182":1}}],["compile",{"2":{"9":1,"28":1,"78":1}}]],"serializationVersion":2}'; -export { - _localSearchIndexroot as default -}; diff --git a/previews/PR218/assets/chunks/VPLocalSearchBox.CYisIjwo.js b/previews/PR218/assets/chunks/VPLocalSearchBox.DERldJmv.js similarity index 99% rename from previews/PR218/assets/chunks/VPLocalSearchBox.CYisIjwo.js rename to previews/PR218/assets/chunks/VPLocalSearchBox.DERldJmv.js index 8caa69081..754998a9e 100644 --- a/previews/PR218/assets/chunks/VPLocalSearchBox.CYisIjwo.js +++ b/previews/PR218/assets/chunks/VPLocalSearchBox.DERldJmv.js @@ -2,8 +2,8 @@ var __defProp = Object.defineProperty; var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value; var __publicField = (obj, key, value) => __defNormalProp(obj, typeof key !== "symbol" ? key + "" : key, value); import { V as __vitePreload, p as ref, h as computed, aj as toValue, ak as unrefElement, al as notNullish, q as watch, am as tryOnScopeDispose, d as defineComponent, D as shallowRef, an as computedAsync, ao as useSessionStorage, ap as useLocalStorage, s as watchEffect, aq as watchDebounced, v as onMounted, P as nextTick, O as onKeyStroke, ar as useRouter, as as useEventListener, W as useScrollLock, R as inBrowser, $ as onBeforeUnmount, o as openBlock, b as createBlock, j as createBaseVNode, a0 as withModifiers, k as unref, at as withDirectives, au as vModelText, av as isRef, c as createElementBlock, n as normalizeClass, e as createCommentVNode, C as renderList, F as Fragment, a as createTextVNode, t as toDisplayString, aw as Teleport, ax as markRaw, ay as createApp, a9 as dataSymbol, af as pathToFile, az as escapeRegExp, _ as _export_sfc } from "./framework.Dg7-7npA.js"; -import { u as useData, c as createSearchTranslate } from "./theme.ik5QZKRB.js"; -const localSearchIndex = { "root": () => __vitePreload(() => import("./@localSearchIndexroot.Dn3ujldP.js"), true ? [] : void 0) }; +import { u as useData, c as createSearchTranslate } from "./theme.r06NSmXs.js"; +const localSearchIndex = { "root": () => __vitePreload(() => import("./@localSearchIndexroot.CKvFodwz.js"), true ? [] : void 0) }; /*! * tabbable 6.2.0 * @license MIT, https://github.com/focus-trap/tabbable/blob/master/LICENSE diff --git a/previews/PR218/assets/chunks/theme.ik5QZKRB.js b/previews/PR218/assets/chunks/theme.r06NSmXs.js similarity index 99% rename from previews/PR218/assets/chunks/theme.ik5QZKRB.js rename to previews/PR218/assets/chunks/theme.r06NSmXs.js index d59c99592..828a3b5c6 100644 --- a/previews/PR218/assets/chunks/theme.ik5QZKRB.js +++ b/previews/PR218/assets/chunks/theme.r06NSmXs.js @@ -1,4 +1,4 @@ -const __vite__mapDeps=(i,m=__vite__mapDeps,d=(m.f||(m.f=["assets/chunks/VPLocalSearchBox.CYisIjwo.js","assets/chunks/framework.Dg7-7npA.js"])))=>i.map(i=>d[i]); +const __vite__mapDeps=(i,m=__vite__mapDeps,d=(m.f||(m.f=["assets/chunks/VPLocalSearchBox.DERldJmv.js","assets/chunks/framework.Dg7-7npA.js"])))=>i.map(i=>d[i]); import { d as defineComponent, o as openBlock, c as createElementBlock, r as renderSlot, n as normalizeClass, a as createTextVNode, t as toDisplayString, b as createBlock, w as withCtx, e as createCommentVNode, T as Transition, _ as _export_sfc, u as useData$1, i as isExternal, f as treatAsHtml, g as withBase, h as computed, j as createBaseVNode, k as unref, l as isActive, m as useMediaQuery, p as ref, q as watch, s as watchEffect, v as onMounted, x as onUnmounted, y as watchPostEffect, z as onUpdated, A as getScrollOffset, B as resolveComponent, F as Fragment, C as renderList, D as shallowRef, E as onContentUpdated, G as createVNode, H as resolveDynamicComponent, I as EXTERNAL_URL_RE, J as useRoute, K as mergeProps, L as inject, M as useWindowSize, N as normalizeStyle, O as onKeyStroke, P as nextTick, Q as useWindowScroll, R as inBrowser, S as readonly, U as defineAsyncComponent, V as __vitePreload, W as useScrollLock, X as provide, Y as withKeys, Z as toHandlers, $ as onBeforeUnmount, a0 as withModifiers, a1 as useSlots, a2 as reactive, a3 as toRef, a4 as h } from "./framework.Dg7-7npA.js"; const _sfc_main$Z = /* @__PURE__ */ defineComponent({ __name: "VPBadge", @@ -2215,7 +2215,7 @@ const _hoisted_3$3 = { const _sfc_main$l = /* @__PURE__ */ defineComponent({ __name: "VPNavBarSearch", setup(__props) { - const VPLocalSearchBox = defineAsyncComponent(() => __vitePreload(() => import("./VPLocalSearchBox.CYisIjwo.js"), true ? __vite__mapDeps([0,1]) : void 0)); + const VPLocalSearchBox = defineAsyncComponent(() => __vitePreload(() => import("./VPLocalSearchBox.DERldJmv.js"), true ? __vite__mapDeps([0,1]) : void 0)); const VPAlgoliaSearchBox = () => null; const { theme: theme2 } = useData(); const loaded = ref(false); diff --git a/previews/PR218/assets/extra_tools_agent_tools_intro.md.cSVCxyyT.js b/previews/PR218/assets/extra_tools_agent_tools_intro.md.C6nMFv4B.js similarity index 98% rename from previews/PR218/assets/extra_tools_agent_tools_intro.md.cSVCxyyT.js rename to previews/PR218/assets/extra_tools_agent_tools_intro.md.C6nMFv4B.js index 592b787ee..d1e50d4b1 100644 --- a/previews/PR218/assets/extra_tools_agent_tools_intro.md.cSVCxyyT.js +++ b/previews/PR218/assets/extra_tools_agent_tools_intro.md.C6nMFv4B.js @@ -3,7 +3,7 @@ const __pageData = JSON.parse('{"title":"Agent Tools Introduction","description" const _sfc_main = { name: "extra_tools/agent_tools_intro.md" }; function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { return openBlock(), createElementBlock("div", null, _cache[0] || (_cache[0] = [ - createStaticVNode('

Agent Tools Introduction

AgentTools is an experimental module that provides a set of utilities for building advanced agentic workflows, code-generating and self-fixing agents.

Import the module as follows:

julia
using PromptingTools.Experimental.AgentTools\n# to access unexported functionality\nconst AT = PromptingTools.Experimental.AgentTools

Highlights

The main functions to be aware of are:

The main contribution of this module is providing the "lazy" counterparts to the ai... functions, which allow us to build a workflow, which can be re-executed many times with the same inputs.

For example, AIGenerate() will create a lazy instance of aigenerate, which is an instance of AICall with aigenerate as its ai-calling function. It uses exactly the same arguments and keyword arguments as aigenerate (see ?aigenerate for details). The notion of "lazy" refers to the fact that it does NOT generate any output when instantiated (only when run! is called).

Or said differently, the AICall struct and all its flavors (AIGenerate, ...) are designed to facilitate a deferred execution model (lazy evaluation) for AI functions that interact with a Language Learning Model (LLM). It stores the necessary information for an AI call and executes the underlying AI function only when supplied with a UserMessage or when the run! method is applied. This allows us to remember user inputs and trigger the LLM call repeatedly if needed, which enables automatic fixing (see ?airetry!).

Examples

Automatic Fixing of AI Calls

We need to switch from aigenerate to AIGenerate to get the lazy version of the function.

julia
output = AIGenerate("Say hi!"; model="gpt4t") |> run!

How is it useful? We can use the same "inputs" for repeated calls, eg, when we want to validate or regenerate some outputs. We have a function airetry! to help us with that.

The signature of airetry is airetry(condition_function, aicall::AICall, feedback_function).

It evaluates the condition condition_function on the aicall object (eg, we evaluate f_cond(aicall) -> Bool). If it fails, we call feedback_function on the aicall object to provide feedback for the AI model (eg, f_feedback(aicall) -> String) and repeat the process until it passes or until max_retries value is exceeded.

We can catch API failures (no feedback needed, so none is provided)

julia
# API failure because of a non-existent model\n# RetryConfig allows us to change the "retry" behaviour of any lazy call\noutput = AIGenerate("say hi!"; config = RetryConfig(; catch_errors = true),\n    model = "NOTEXIST")\nrun!(output) # fails\n\n# we ask to wait 2s between retries and retry 2 times (can be set in `config` in aicall as well)\nairetry!(isvalid, output; retry_delay = 2, max_retries = 2)

Or we can use it for output validation (eg, its format, its content, etc.) and feedback generation.

Let's play a color guessing game (I'm thinking "yellow"). We'll implement two formatting checks with airetry!:

julia
# Notice that we ask for two samples (`n_samples=2`) at each attempt (to improve our chances). \n# Both guesses are scored at each time step, and the best one is chosen for the next step.\n# And with OpenAI, we can set `api_kwargs = (;n=2)` to get both samples simultaneously (cheaper and faster)!\nout = AIGenerate(\n    "Guess what color I'm thinking. It could be: blue, red, black, white, yellow. Answer with 1 word only";\n    verbose = false,\n    config = RetryConfig(; n_samples = 2), api_kwargs = (; n = 2))\nrun!(out)\n\n## Check that the output is 1 word only, third argument is the feedback that will be provided if the condition fails\n## Notice: functions operate on `aicall` as the only argument. We can use utilities like `last_output` and `last_message` to access the last message and output in the conversation.\nairetry!(x -> length(split(last_output(x), r" |\\\\.")) == 1, out,\n    "You must answer with 1 word only.")\n\n# Note: you could also use the do-syntax, eg, \nairetry!(out, "You must answer with 1 word only.") do aicall\n    length(split(last_output(aicall), r" |\\\\.")) == 1\nend

You can even add the guessing itself as an airetry! condition of last_output(out) == "yellow" and provide feedback if the guess is wrong.

References

# PromptingTools.Experimental.AgentTools.AIGenerateFunction.
julia
AIGenerate(args...; kwargs...)

Creates a lazy instance of aigenerate. It is an instance of AICall with aigenerate as the function.

Use exactly the same arguments and keyword arguments as aigenerate (see ?aigenerate for details).

source


# PromptingTools.Experimental.AgentTools.AICallType.
julia
AICall(func::F, args...; kwargs...) where {F<:Function}\n\nAIGenerate(args...; kwargs...)\nAIEmbed(args...; kwargs...)\nAIExtract(args...; kwargs...)

A lazy call wrapper for AI functions in the PromptingTools module, such as aigenerate.

The AICall struct is designed to facilitate a deferred execution model (lazy evaluation) for AI functions that interact with a Language Learning Model (LLM). It stores the necessary information for an AI call and executes the underlying AI function only when supplied with a UserMessage or when the run! method is applied. This approach allows for more flexible and efficient handling of AI function calls, especially in interactive environments.

Seel also: run!, AICodeFixer

Fields

Example

Initiate an AICall like any ai* function, eg, AIGenerate:

julia
aicall = AICall(aigenerate)\n\n# With arguments and kwargs like ai* functions\n# from `aigenerate(schema, conversation; model="abc", api_kwargs=(; temperature=0.1))`\n# to\naicall = AICall(aigenerate, schema, conversation; model="abc", api_kwargs=(; temperature=0.1)\n\n# Or with a template\naicall = AIGenerate(:JuliaExpertAsk; ask="xyz", model="abc", api_kwargs=(; temperature=0.1))

Trigger the AICall with run! (it returns the update AICall struct back):

julia
aicall |> run!\n````\n\nYou can also use `AICall` as a functor to trigger the AI call with a `UserMessage` or simply the text to send:

julia aicall(UserMessage("Hello, world!")) # Triggers the lazy call result = run!(aicall) # Explicitly runs the AI call ``` This can be used to "reply" to previous message / continue the stored conversation

Notes

source


# PromptingTools.last_outputFunction.

Extracts the last output (generated text answer) from the RAGResult.

source

Helpful accessor for AICall blocks. Returns the last output in the conversation (eg, the string/data in the last message).

source

Helpful accessor for the last generated output (msg.content) in conversation. Returns the last output in the conversation (eg, the string/data in the last message).

source


# PromptingTools.last_messageFunction.
julia
PT.last_message(result::RAGResult)

Extract the last message from the RAGResult. It looks for final_answer first, then answer fields in the conversations dictionary. Returns nothing if not found.

source

Helpful accessor for AICall blocks. Returns the last message in the conversation.

source

Helpful accessor for the last message in conversation. Returns the last message in the conversation.

source


# PromptingTools.Experimental.AgentTools.airetry!Function.
julia
airetry!(\n    f_cond::Function, aicall::AICallBlock, feedback::Union{AbstractString, Function} = "";\n    verbose::Bool = true, throw::Bool = false, evaluate_all::Bool = true, feedback_expensive::Bool = false,\n    max_retries::Union{Nothing, Int} = nothing, retry_delay::Union{Nothing, Int} = nothing)

Evaluates the condition f_cond on the aicall object. If the condition is not met, it will return the best sample to retry from and provide feedback (string or function) to aicall. That's why it's mutating. It will retry maximum max_retries times, with throw=true, an error will be thrown if the condition is not met after max_retries retries.

Note: aicall must be run first via run!(aicall) before calling airetry!.

Function signatures

You can leverage the last_message, last_output, and AICode functions to access the last message, last output and execute code blocks in the conversation, respectively. See examples below.

Good Use Cases

Gotchas

Arguments

Returns

Example

You can use airetry! to catch API errors in run! and auto-retry the call. RetryConfig is how you influence all the subsequent retry behaviours - see ?RetryConfig for more details.

julia
# API failure because of a non-existent model\nout = AIGenerate("say hi!"; config = RetryConfig(; catch_errors = true),\n    model = "NOTEXIST")\nrun!(out) # fails\n\n# we ask to wait 2s between retries and retry 2 times (can be set in `config` in aicall as well)\nairetry!(isvalid, out; retry_delay = 2, max_retries = 2)

If you provide arguments to the aicall, we try to honor them as much as possible in the following calls, eg, set low verbosity

julia
out = AIGenerate("say hi!"; config = RetryConfig(; catch_errors = true),\nmodel = "NOTEXIST", verbose=false)\nrun!(out)\n# No info message, you just see `success = false` in the properties of the AICall

Let's show a toy example to demonstrate the runtime checks / guardrails for the model output. We'll play a color guessing game (I'm thinking "yellow"):

julia
# Notice that we ask for two samples (`n_samples=2`) at each attempt (to improve our chances). \n# Both guesses are scored at each time step, and the best one is chosen for the next step.\n# And with OpenAI, we can set `api_kwargs = (;n=2)` to get both samples simultaneously (cheaper and faster)!\nout = AIGenerate(\n    "Guess what color I'm thinking. It could be: blue, red, black, white, yellow. Answer with 1 word only";\n    verbose = false,\n    config = RetryConfig(; n_samples = 2), api_kwargs = (; n = 2))\nrun!(out)\n\n\n## Check that the output is 1 word only, third argument is the feedback that will be provided if the condition fails\n## Notice: functions operate on `aicall` as the only argument. We can use utilities like `last_output` and `last_message` to access the last message and output in the conversation.\nairetry!(x -> length(split(last_output(x), r" |\\.")) == 1, out,\n    "You must answer with 1 word only.")\n\n\n## Let's ensure that the output is in lowercase - simple and short\nairetry!(x -> all(islowercase, last_output(x)), out, "You must answer in lowercase.")\n# [ Info: Condition not met. Retrying...\n\n\n## Let's add final hint - it took us 2 retries\nairetry!(x -> startswith(last_output(x), "y"), out, "It starts with "y"")\n# [ Info: Condition not met. Retrying...\n# [ Info: Condition not met. Retrying...\n\n\n## We end up with the correct answer\nlast_output(out)\n# Output: "yellow"

Let's explore how we got here. We save the various attempts in a "tree" (SampleNode object) You can access it in out.samples, which is the ROOT of the tree (top level). Currently "active" sample ID is out.active_sample_id -> that's the same as conversation field in your AICall.

julia
# Root node:\nout.samples\n# Output: SampleNode(id: 46839, stats: 6/12, length: 2)\n\n# Active sample (our correct answer):\nout.active_sample_id \n# Output: 50086\n\n# Let's obtain the active sample node with this ID  - use getindex notation or function find_node\nout.samples[out.active_sample_id]\n# Output: SampleNode(id: 50086, stats: 1/1, length: 7)\n\n# The SampleNode has two key fields: data and feedback. Data is where the conversation is stored:\nactive_sample = out.samples[out.active_sample_id]\nactive_sample.data == out.conversation # Output: true -> This is the winning guess!

We also get a clear view of the tree structure of all samples with print_samples:

julia
julia> print_samples(out.samples)\nSampleNode(id: 46839, stats: 6/12, score: 0.5, length: 2)\n├─ SampleNode(id: 12940, stats: 5/8, score: 1.41, length: 4)\n│  ├─ SampleNode(id: 34315, stats: 3/4, score: 1.77, length: 6)\n│  │  ├─ SampleNode(id: 20493, stats: 1/1, score: 2.67, length: 7)\n│  │  └─ SampleNode(id: 50086, stats: 1/1, score: 2.67, length: 7)\n│  └─ SampleNode(id: 2733, stats: 1/2, score: 1.94, length: 5)\n└─ SampleNode(id: 48343, stats: 1/4, score: 1.36, length: 4)\n   ├─ SampleNode(id: 30088, stats: 0/1, score: 1.67, length: 5)\n   └─ SampleNode(id: 44816, stats: 0/1, score: 1.67, length: 5)

You can use the id to grab and inspect any of these nodes, eg,

julia
out.samples[2733]\n# Output: SampleNode(id: 2733, stats: 1/2, length: 5)

We can also iterate through all samples and extract whatever information we want with PostOrderDFS or PreOrderDFS (exported from AbstractTrees.jl)

julia
for sample in PostOrderDFS(out.samples)\n    # Data is the universal field for samples, we put `conversation` in there\n    # Last item in data is the last message in coversation\n    msg = sample.data[end]\n    if msg isa PT.AIMessage # skip feedback\n        # get only the message content, ie, the guess\n        println("ID: $(sample.id), Answer: $(msg.content)")\n    end\nend\n\n# ID: 20493, Answer: yellow\n# ID: 50086, Answer: yellow\n# ID: 2733, Answer: red\n# ID: 30088, Answer: blue\n# ID: 44816, Answer: blue

Note: airetry! will attempt to fix the model max_retries times. If you set throw=true, it will throw an ErrorException if the condition is not met after max_retries retries.

Let's define a mini program to guess the number and use airetry! to guide the model to the correct answer:

julia
"""\n    llm_guesser()\n\nMini program to guess the number provided by the user (betwee 1-100).\n"""\nfunction llm_guesser(user_number::Int)\n    @assert 1 <= user_number <= 100\n    prompt = """\nI'm thinking a number between 1-100. Guess which one it is. \nYou must respond only with digits and nothing else. \nYour guess:"""\n    ## 2 samples at a time, max 5 fixing rounds\n    out = AIGenerate(prompt; config = RetryConfig(; n_samples = 2, max_retries = 5),\n        api_kwargs = (; n = 2)) |> run!\n    ## Check the proper output format - must parse to Int, use do-syntax\n    ## We can provide feedback via a function!\n    function feedback_f(aicall)\n        "Output: $(last_output(aicall))\nFeedback: You must respond only with digits!!"\n    end\n    airetry!(out, feedback_f) do aicall\n        !isnothing(tryparse(Int, last_output(aicall)))\n    end\n    ## Give a hint on bounds\n    lower_bound = (user_number ÷ 10) * 10\n    upper_bound = lower_bound + 10\n    airetry!(\n        out, "The number is between or equal to $lower_bound to $upper_bound.") do aicall\n        guess = tryparse(Int, last_output(aicall))\n        lower_bound <= guess <= upper_bound\n    end\n    ## You can make at most 3x guess now -- if there is max_retries in `config.max_retries` left\n    max_retries = out.config.retries + 3\n    function feedback_f2(aicall)\n        guess = tryparse(Int, last_output(aicall))\n        "Your guess of $(guess) is wrong, it's $(abs(guess-user_number)) numbers away."\n    end\n    airetry!(out, feedback_f2; max_retries) do aicall\n        tryparse(Int, last_output(aicall)) == user_number\n    end\n\n    ## Evaluate the best guess\n    @info "Results: Guess: $(last_output(out)) vs User: $user_number (Number of calls made: $(out.config.calls))"\n    return out\nend\n\n# Let's play the game\nout = llm_guesser(33)\n[ Info: Condition not met. Retrying...\n[ Info: Condition not met. Retrying...\n[ Info: Condition not met. Retrying...\n[ Info: Condition not met. Retrying...\n[ Info: Results: Guess: 33 vs User: 33 (Number of calls made: 10)

Yay! We got it 😃

Now, we could explore different samples (eg, print_samples(out.samples)) or see what the model guessed at each step:

julia
print_samples(out.samples)\n## SampleNode(id: 57694, stats: 6/14, score: 0.43, length: 2)\n## ├─ SampleNode(id: 35603, stats: 5/10, score: 1.23, length: 4)\n## │  ├─ SampleNode(id: 55394, stats: 1/4, score: 1.32, length: 6)\n## │  │  ├─ SampleNode(id: 20737, stats: 0/1, score: 1.67, length: 7)\n## │  │  └─ SampleNode(id: 52910, stats: 0/1, score: 1.67, length: 7)\n## │  └─ SampleNode(id: 43094, stats: 3/4, score: 1.82, length: 6)\n## │     ├─ SampleNode(id: 14966, stats: 1/1, score: 2.67, length: 7)\n## │     └─ SampleNode(id: 32991, stats: 1/1, score: 2.67, length: 7)\n## └─ SampleNode(id: 20506, stats: 1/4, score: 1.4, length: 4)\n##    ├─ SampleNode(id: 37581, stats: 0/1, score: 1.67, length: 5)\n##    └─ SampleNode(id: 46632, stats: 0/1, score: 1.67, length: 5)\n\n# Lastly, let's check all the guesses AI made across all samples. \n# Our winning guess was ID 32991 (`out.active_sample_id`)\n\nfor sample in PostOrderDFS(out.samples)\n    [println("ID: $(sample.id), Guess: $(msg.content)")\n     for msg in sample.data if msg isa PT.AIMessage]\nend\n## ID: 20737, Guess: 50\n## ID: 20737, Guess: 35\n## ID: 20737, Guess: 37\n## ID: 52910, Guess: 50\n## ID: 52910, Guess: 35\n## ID: 52910, Guess: 32\n## ID: 14966, Guess: 50\n## ID: 14966, Guess: 35\n## ID: 14966, Guess: 33\n## ID: 32991, Guess: 50\n## ID: 32991, Guess: 35\n## ID: 32991, Guess: 33\n## etc...

Note that if there are multiple "branches" the model will see only the feedback of its own and its ancestors not the other "branches". If you wanted to provide ALL feedback, set RetryConfig(; n_samples=1) to remove any "branching". It fixing will be done sequentially in one conversation and the model will see all feedback (less powerful if the model falls into a bad state). Alternatively, you can tweak the feedback function.

See Also

References: airetry is inspired by the Language Agent Tree Search paper and by DSPy Assertions paper.

source


# PromptingTools.Experimental.AgentTools.print_samplesFunction.

Pretty prints the samples tree starting from node. Usually, node is the root of the tree. Example: print_samples(aicall.samples).

source


# PromptingTools.AICodeType.
julia
AICode(code::AbstractString; auto_eval::Bool=true, safe_eval::Bool=false, \nskip_unsafe::Bool=false, capture_stdout::Bool=true, verbose::Bool=false,\nprefix::AbstractString="", suffix::AbstractString="", remove_tests::Bool=false, execution_timeout::Int = 60)\n\nAICode(msg::AIMessage; auto_eval::Bool=true, safe_eval::Bool=false, \nskip_unsafe::Bool=false, skip_invalid::Bool=false, capture_stdout::Bool=true,\nverbose::Bool=false, prefix::AbstractString="", suffix::AbstractString="", remove_tests::Bool=false, execution_timeout::Int = 60)

A mutable structure representing a code block (received from the AI model) with automatic parsing, execution, and output/error capturing capabilities.

Upon instantiation with a string, the AICode object automatically runs a code parser and executor (via PromptingTools.eval!()), capturing any standard output (stdout) or errors. This structure is useful for programmatically handling and evaluating Julia code snippets.

See also: PromptingTools.extract_code_blocks, PromptingTools.eval!

Workflow

Properties

Keyword Arguments

Methods

Examples

julia
code = AICode("println("Hello, World!")") # Auto-parses and evaluates the code, capturing output and errors.\nisvalid(code) # Output: true\ncode.stdout # Output: "Hello, World!\n"

We try to evaluate "safely" by default (eg, inside a custom module, to avoid changing user variables). You can avoid that with save_eval=false:

julia
code = AICode("new_variable = 1"; safe_eval=false)\nisvalid(code) # Output: true\nnew_variable # Output: 1

You can also call AICode directly on an AIMessage, which will extract the Julia code blocks, concatenate them and evaluate them:

julia
msg = aigenerate("In Julia, how do you create a vector of 10 random numbers?")\ncode = AICode(msg)\n# Output: AICode(Success: True, Parsed: True, Evaluated: True, Error Caught: N/A, StdOut: True, Code: 2 Lines)\n\n# show the code\ncode.code |> println\n# Output: \n# numbers = rand(10)\n# numbers = rand(1:100, 10)\n\n# or copy it to the clipboard\ncode.code |> clipboard\n\n# or execute it in the current module (=Main)\neval(code.expression)

source


# PromptingTools.Experimental.AgentTools.aicodefixer_feedbackFunction.
julia
aicodefixer_feedback(cb::AICode; max_length::Int = 512) -> NamedTuple(; feedback::String)\naicodefixer_feedback(conversation::AbstractVector{<:PT.AbstractMessage}; max_length::Int = 512) -> NamedTuple(; feedback::String)\naicodefixer_feedback(msg::PT.AIMessage; max_length::Int = 512) -> NamedTuple(; feedback::String)\naicodefixer_feedback(aicall::AICall; max_length::Int = 512) -> NamedTuple(; feedback::String)

Generate feedback for an AI code fixing session based on the AICode block /or conversation history (that will be used to extract and evaluate a code block). Function is designed to be extensible for different types of feedback and code evaluation outcomes.

The highlevel wrapper accepts a conversation and returns new kwargs for the AICall.

Individual feedback functions are dispatched on different subtypes of AbstractCodeOutcome and can be extended/overwritten to provide more detailed feedback.

See also: AIGenerate, AICodeFixer

Arguments

Returns

Example

julia
cb = AICode(msg; skip_unsafe = true, capture_stdout = true)\nnew_kwargs = aicodefixer_feedback(cb)\n\nnew_kwargs = aicodefixer_feedback(msg)\nnew_kwargs = aicodefixer_feedback(conversation)

Notes

This function is part of the AI code fixing system, intended to interact with code in AIMessage and provide feedback on improving it.

The highlevel wrapper accepts a conversation and returns new kwargs for the AICall.

It dispatches for the code feedback based on the subtypes of AbstractCodeOutcome below:

You can override the individual methods to customize the feedback.

source


# PromptingTools.Experimental.AgentTools.error_feedbackFunction.
julia
error_feedback(e::Any; max_length::Int = 512)

Set of specialized methods to provide feedback on different types of errors (e).

source


', 42) + createStaticVNode('

Agent Tools Introduction

AgentTools is an experimental module that provides a set of utilities for building advanced agentic workflows, code-generating and self-fixing agents.

Import the module as follows:

julia
using PromptingTools.Experimental.AgentTools\n# to access unexported functionality\nconst AT = PromptingTools.Experimental.AgentTools

Highlights

The main functions to be aware of are:

The main contribution of this module is providing the "lazy" counterparts to the ai... functions, which allow us to build a workflow, which can be re-executed many times with the same inputs.

For example, AIGenerate() will create a lazy instance of aigenerate, which is an instance of AICall with aigenerate as its ai-calling function. It uses exactly the same arguments and keyword arguments as aigenerate (see ?aigenerate for details). The notion of "lazy" refers to the fact that it does NOT generate any output when instantiated (only when run! is called).

Or said differently, the AICall struct and all its flavors (AIGenerate, ...) are designed to facilitate a deferred execution model (lazy evaluation) for AI functions that interact with a Language Learning Model (LLM). It stores the necessary information for an AI call and executes the underlying AI function only when supplied with a UserMessage or when the run! method is applied. This allows us to remember user inputs and trigger the LLM call repeatedly if needed, which enables automatic fixing (see ?airetry!).

Examples

Automatic Fixing of AI Calls

We need to switch from aigenerate to AIGenerate to get the lazy version of the function.

julia
output = AIGenerate("Say hi!"; model="gpt4t") |> run!

How is it useful? We can use the same "inputs" for repeated calls, eg, when we want to validate or regenerate some outputs. We have a function airetry! to help us with that.

The signature of airetry is airetry(condition_function, aicall::AICall, feedback_function).

It evaluates the condition condition_function on the aicall object (eg, we evaluate f_cond(aicall) -> Bool). If it fails, we call feedback_function on the aicall object to provide feedback for the AI model (eg, f_feedback(aicall) -> String) and repeat the process until it passes or until max_retries value is exceeded.

We can catch API failures (no feedback needed, so none is provided)

julia
# API failure because of a non-existent model\n# RetryConfig allows us to change the "retry" behaviour of any lazy call\noutput = AIGenerate("say hi!"; config = RetryConfig(; catch_errors = true),\n    model = "NOTEXIST")\nrun!(output) # fails\n\n# we ask to wait 2s between retries and retry 2 times (can be set in `config` in aicall as well)\nairetry!(isvalid, output; retry_delay = 2, max_retries = 2)

Or we can use it for output validation (eg, its format, its content, etc.) and feedback generation.

Let's play a color guessing game (I'm thinking "yellow"). We'll implement two formatting checks with airetry!:

julia
# Notice that we ask for two samples (`n_samples=2`) at each attempt (to improve our chances). \n# Both guesses are scored at each time step, and the best one is chosen for the next step.\n# And with OpenAI, we can set `api_kwargs = (;n=2)` to get both samples simultaneously (cheaper and faster)!\nout = AIGenerate(\n    "Guess what color I'm thinking. It could be: blue, red, black, white, yellow. Answer with 1 word only";\n    verbose = false,\n    config = RetryConfig(; n_samples = 2), api_kwargs = (; n = 2))\nrun!(out)\n\n## Check that the output is 1 word only, third argument is the feedback that will be provided if the condition fails\n## Notice: functions operate on `aicall` as the only argument. We can use utilities like `last_output` and `last_message` to access the last message and output in the conversation.\nairetry!(x -> length(split(last_output(x), r" |\\\\.")) == 1, out,\n    "You must answer with 1 word only.")\n\n# Note: you could also use the do-syntax, eg, \nairetry!(out, "You must answer with 1 word only.") do aicall\n    length(split(last_output(aicall), r" |\\\\.")) == 1\nend

You can even add the guessing itself as an airetry! condition of last_output(out) == "yellow" and provide feedback if the guess is wrong.

References

# PromptingTools.Experimental.AgentTools.AIGenerateFunction.
julia
AIGenerate(args...; kwargs...)

Creates a lazy instance of aigenerate. It is an instance of AICall with aigenerate as the function.

Use exactly the same arguments and keyword arguments as aigenerate (see ?aigenerate for details).

source


# PromptingTools.Experimental.AgentTools.AICallType.
julia
AICall(func::F, args...; kwargs...) where {F<:Function}\n\nAIGenerate(args...; kwargs...)\nAIEmbed(args...; kwargs...)\nAIExtract(args...; kwargs...)

A lazy call wrapper for AI functions in the PromptingTools module, such as aigenerate.

The AICall struct is designed to facilitate a deferred execution model (lazy evaluation) for AI functions that interact with a Language Learning Model (LLM). It stores the necessary information for an AI call and executes the underlying AI function only when supplied with a UserMessage or when the run! method is applied. This approach allows for more flexible and efficient handling of AI function calls, especially in interactive environments.

Seel also: run!, AICodeFixer

Fields

Example

Initiate an AICall like any ai* function, eg, AIGenerate:

julia
aicall = AICall(aigenerate)\n\n# With arguments and kwargs like ai* functions\n# from `aigenerate(schema, conversation; model="abc", api_kwargs=(; temperature=0.1))`\n# to\naicall = AICall(aigenerate, schema, conversation; model="abc", api_kwargs=(; temperature=0.1)\n\n# Or with a template\naicall = AIGenerate(:JuliaExpertAsk; ask="xyz", model="abc", api_kwargs=(; temperature=0.1))

Trigger the AICall with run! (it returns the update AICall struct back):

julia
aicall |> run!\n````\n\nYou can also use `AICall` as a functor to trigger the AI call with a `UserMessage` or simply the text to send:

julia aicall(UserMessage("Hello, world!")) # Triggers the lazy call result = run!(aicall) # Explicitly runs the AI call ``` This can be used to "reply" to previous message / continue the stored conversation

Notes

source


# PromptingTools.last_outputFunction.

Extracts the last output (generated text answer) from the RAGResult.

source

Helpful accessor for AICall blocks. Returns the last output in the conversation (eg, the string/data in the last message).

source

Helpful accessor for the last generated output (msg.content) in conversation. Returns the last output in the conversation (eg, the string/data in the last message).

source


# PromptingTools.last_messageFunction.
julia
PT.last_message(result::RAGResult)

Extract the last message from the RAGResult. It looks for final_answer first, then answer fields in the conversations dictionary. Returns nothing if not found.

source

Helpful accessor for AICall blocks. Returns the last message in the conversation.

source

Helpful accessor for the last message in conversation. Returns the last message in the conversation.

source


# PromptingTools.Experimental.AgentTools.airetry!Function.
julia
airetry!(\n    f_cond::Function, aicall::AICallBlock, feedback::Union{AbstractString, Function} = "";\n    verbose::Bool = true, throw::Bool = false, evaluate_all::Bool = true, feedback_expensive::Bool = false,\n    max_retries::Union{Nothing, Int} = nothing, retry_delay::Union{Nothing, Int} = nothing)

Evaluates the condition f_cond on the aicall object. If the condition is not met, it will return the best sample to retry from and provide feedback (string or function) to aicall. That's why it's mutating. It will retry maximum max_retries times, with throw=true, an error will be thrown if the condition is not met after max_retries retries.

Note: aicall must be run first via run!(aicall) before calling airetry!.

Function signatures

You can leverage the last_message, last_output, and AICode functions to access the last message, last output and execute code blocks in the conversation, respectively. See examples below.

Good Use Cases

Gotchas

Arguments

Returns

Example

You can use airetry! to catch API errors in run! and auto-retry the call. RetryConfig is how you influence all the subsequent retry behaviours - see ?RetryConfig for more details.

julia
# API failure because of a non-existent model\nout = AIGenerate("say hi!"; config = RetryConfig(; catch_errors = true),\n    model = "NOTEXIST")\nrun!(out) # fails\n\n# we ask to wait 2s between retries and retry 2 times (can be set in `config` in aicall as well)\nairetry!(isvalid, out; retry_delay = 2, max_retries = 2)

If you provide arguments to the aicall, we try to honor them as much as possible in the following calls, eg, set low verbosity

julia
out = AIGenerate("say hi!"; config = RetryConfig(; catch_errors = true),\nmodel = "NOTEXIST", verbose=false)\nrun!(out)\n# No info message, you just see `success = false` in the properties of the AICall

Let's show a toy example to demonstrate the runtime checks / guardrails for the model output. We'll play a color guessing game (I'm thinking "yellow"):

julia
# Notice that we ask for two samples (`n_samples=2`) at each attempt (to improve our chances). \n# Both guesses are scored at each time step, and the best one is chosen for the next step.\n# And with OpenAI, we can set `api_kwargs = (;n=2)` to get both samples simultaneously (cheaper and faster)!\nout = AIGenerate(\n    "Guess what color I'm thinking. It could be: blue, red, black, white, yellow. Answer with 1 word only";\n    verbose = false,\n    config = RetryConfig(; n_samples = 2), api_kwargs = (; n = 2))\nrun!(out)\n\n\n## Check that the output is 1 word only, third argument is the feedback that will be provided if the condition fails\n## Notice: functions operate on `aicall` as the only argument. We can use utilities like `last_output` and `last_message` to access the last message and output in the conversation.\nairetry!(x -> length(split(last_output(x), r" |\\.")) == 1, out,\n    "You must answer with 1 word only.")\n\n\n## Let's ensure that the output is in lowercase - simple and short\nairetry!(x -> all(islowercase, last_output(x)), out, "You must answer in lowercase.")\n# [ Info: Condition not met. Retrying...\n\n\n## Let's add final hint - it took us 2 retries\nairetry!(x -> startswith(last_output(x), "y"), out, "It starts with "y"")\n# [ Info: Condition not met. Retrying...\n# [ Info: Condition not met. Retrying...\n\n\n## We end up with the correct answer\nlast_output(out)\n# Output: "yellow"

Let's explore how we got here. We save the various attempts in a "tree" (SampleNode object) You can access it in out.samples, which is the ROOT of the tree (top level). Currently "active" sample ID is out.active_sample_id -> that's the same as conversation field in your AICall.

julia
# Root node:\nout.samples\n# Output: SampleNode(id: 46839, stats: 6/12, length: 2)\n\n# Active sample (our correct answer):\nout.active_sample_id \n# Output: 50086\n\n# Let's obtain the active sample node with this ID  - use getindex notation or function find_node\nout.samples[out.active_sample_id]\n# Output: SampleNode(id: 50086, stats: 1/1, length: 7)\n\n# The SampleNode has two key fields: data and feedback. Data is where the conversation is stored:\nactive_sample = out.samples[out.active_sample_id]\nactive_sample.data == out.conversation # Output: true -> This is the winning guess!

We also get a clear view of the tree structure of all samples with print_samples:

julia
julia> print_samples(out.samples)\nSampleNode(id: 46839, stats: 6/12, score: 0.5, length: 2)\n├─ SampleNode(id: 12940, stats: 5/8, score: 1.41, length: 4)\n│  ├─ SampleNode(id: 34315, stats: 3/4, score: 1.77, length: 6)\n│  │  ├─ SampleNode(id: 20493, stats: 1/1, score: 2.67, length: 7)\n│  │  └─ SampleNode(id: 50086, stats: 1/1, score: 2.67, length: 7)\n│  └─ SampleNode(id: 2733, stats: 1/2, score: 1.94, length: 5)\n└─ SampleNode(id: 48343, stats: 1/4, score: 1.36, length: 4)\n   ├─ SampleNode(id: 30088, stats: 0/1, score: 1.67, length: 5)\n   └─ SampleNode(id: 44816, stats: 0/1, score: 1.67, length: 5)

You can use the id to grab and inspect any of these nodes, eg,

julia
out.samples[2733]\n# Output: SampleNode(id: 2733, stats: 1/2, length: 5)

We can also iterate through all samples and extract whatever information we want with PostOrderDFS or PreOrderDFS (exported from AbstractTrees.jl)

julia
for sample in PostOrderDFS(out.samples)\n    # Data is the universal field for samples, we put `conversation` in there\n    # Last item in data is the last message in coversation\n    msg = sample.data[end]\n    if msg isa PT.AIMessage # skip feedback\n        # get only the message content, ie, the guess\n        println("ID: $(sample.id), Answer: $(msg.content)")\n    end\nend\n\n# ID: 20493, Answer: yellow\n# ID: 50086, Answer: yellow\n# ID: 2733, Answer: red\n# ID: 30088, Answer: blue\n# ID: 44816, Answer: blue

Note: airetry! will attempt to fix the model max_retries times. If you set throw=true, it will throw an ErrorException if the condition is not met after max_retries retries.

Let's define a mini program to guess the number and use airetry! to guide the model to the correct answer:

julia
"""\n    llm_guesser()\n\nMini program to guess the number provided by the user (betwee 1-100).\n"""\nfunction llm_guesser(user_number::Int)\n    @assert 1 <= user_number <= 100\n    prompt = """\nI'm thinking a number between 1-100. Guess which one it is. \nYou must respond only with digits and nothing else. \nYour guess:"""\n    ## 2 samples at a time, max 5 fixing rounds\n    out = AIGenerate(prompt; config = RetryConfig(; n_samples = 2, max_retries = 5),\n        api_kwargs = (; n = 2)) |> run!\n    ## Check the proper output format - must parse to Int, use do-syntax\n    ## We can provide feedback via a function!\n    function feedback_f(aicall)\n        "Output: $(last_output(aicall))\nFeedback: You must respond only with digits!!"\n    end\n    airetry!(out, feedback_f) do aicall\n        !isnothing(tryparse(Int, last_output(aicall)))\n    end\n    ## Give a hint on bounds\n    lower_bound = (user_number ÷ 10) * 10\n    upper_bound = lower_bound + 10\n    airetry!(\n        out, "The number is between or equal to $lower_bound to $upper_bound.") do aicall\n        guess = tryparse(Int, last_output(aicall))\n        lower_bound <= guess <= upper_bound\n    end\n    ## You can make at most 3x guess now -- if there is max_retries in `config.max_retries` left\n    max_retries = out.config.retries + 3\n    function feedback_f2(aicall)\n        guess = tryparse(Int, last_output(aicall))\n        "Your guess of $(guess) is wrong, it's $(abs(guess-user_number)) numbers away."\n    end\n    airetry!(out, feedback_f2; max_retries) do aicall\n        tryparse(Int, last_output(aicall)) == user_number\n    end\n\n    ## Evaluate the best guess\n    @info "Results: Guess: $(last_output(out)) vs User: $user_number (Number of calls made: $(out.config.calls))"\n    return out\nend\n\n# Let's play the game\nout = llm_guesser(33)\n[ Info: Condition not met. Retrying...\n[ Info: Condition not met. Retrying...\n[ Info: Condition not met. Retrying...\n[ Info: Condition not met. Retrying...\n[ Info: Results: Guess: 33 vs User: 33 (Number of calls made: 10)

Yay! We got it 😃

Now, we could explore different samples (eg, print_samples(out.samples)) or see what the model guessed at each step:

julia
print_samples(out.samples)\n## SampleNode(id: 57694, stats: 6/14, score: 0.43, length: 2)\n## ├─ SampleNode(id: 35603, stats: 5/10, score: 1.23, length: 4)\n## │  ├─ SampleNode(id: 55394, stats: 1/4, score: 1.32, length: 6)\n## │  │  ├─ SampleNode(id: 20737, stats: 0/1, score: 1.67, length: 7)\n## │  │  └─ SampleNode(id: 52910, stats: 0/1, score: 1.67, length: 7)\n## │  └─ SampleNode(id: 43094, stats: 3/4, score: 1.82, length: 6)\n## │     ├─ SampleNode(id: 14966, stats: 1/1, score: 2.67, length: 7)\n## │     └─ SampleNode(id: 32991, stats: 1/1, score: 2.67, length: 7)\n## └─ SampleNode(id: 20506, stats: 1/4, score: 1.4, length: 4)\n##    ├─ SampleNode(id: 37581, stats: 0/1, score: 1.67, length: 5)\n##    └─ SampleNode(id: 46632, stats: 0/1, score: 1.67, length: 5)\n\n# Lastly, let's check all the guesses AI made across all samples. \n# Our winning guess was ID 32991 (`out.active_sample_id`)\n\nfor sample in PostOrderDFS(out.samples)\n    [println("ID: $(sample.id), Guess: $(msg.content)")\n     for msg in sample.data if msg isa PT.AIMessage]\nend\n## ID: 20737, Guess: 50\n## ID: 20737, Guess: 35\n## ID: 20737, Guess: 37\n## ID: 52910, Guess: 50\n## ID: 52910, Guess: 35\n## ID: 52910, Guess: 32\n## ID: 14966, Guess: 50\n## ID: 14966, Guess: 35\n## ID: 14966, Guess: 33\n## ID: 32991, Guess: 50\n## ID: 32991, Guess: 35\n## ID: 32991, Guess: 33\n## etc...

Note that if there are multiple "branches" the model will see only the feedback of its own and its ancestors not the other "branches". If you wanted to provide ALL feedback, set RetryConfig(; n_samples=1) to remove any "branching". It fixing will be done sequentially in one conversation and the model will see all feedback (less powerful if the model falls into a bad state). Alternatively, you can tweak the feedback function.

See Also

References: airetry is inspired by the Language Agent Tree Search paper and by DSPy Assertions paper.

source


# PromptingTools.Experimental.AgentTools.print_samplesFunction.

Pretty prints the samples tree starting from node. Usually, node is the root of the tree. Example: print_samples(aicall.samples).

source


# PromptingTools.AICodeType.
julia
AICode(code::AbstractString; auto_eval::Bool=true, safe_eval::Bool=false, \nskip_unsafe::Bool=false, capture_stdout::Bool=true, verbose::Bool=false,\nprefix::AbstractString="", suffix::AbstractString="", remove_tests::Bool=false, execution_timeout::Int = 60)\n\nAICode(msg::AIMessage; auto_eval::Bool=true, safe_eval::Bool=false, \nskip_unsafe::Bool=false, skip_invalid::Bool=false, capture_stdout::Bool=true,\nverbose::Bool=false, prefix::AbstractString="", suffix::AbstractString="", remove_tests::Bool=false, execution_timeout::Int = 60)

A mutable structure representing a code block (received from the AI model) with automatic parsing, execution, and output/error capturing capabilities.

Upon instantiation with a string, the AICode object automatically runs a code parser and executor (via PromptingTools.eval!()), capturing any standard output (stdout) or errors. This structure is useful for programmatically handling and evaluating Julia code snippets.

See also: PromptingTools.extract_code_blocks, PromptingTools.eval!

Workflow

Properties

Keyword Arguments

Methods

Examples

julia
code = AICode("println("Hello, World!")") # Auto-parses and evaluates the code, capturing output and errors.\nisvalid(code) # Output: true\ncode.stdout # Output: "Hello, World!\n"

We try to evaluate "safely" by default (eg, inside a custom module, to avoid changing user variables). You can avoid that with save_eval=false:

julia
code = AICode("new_variable = 1"; safe_eval=false)\nisvalid(code) # Output: true\nnew_variable # Output: 1

You can also call AICode directly on an AIMessage, which will extract the Julia code blocks, concatenate them and evaluate them:

julia
msg = aigenerate("In Julia, how do you create a vector of 10 random numbers?")\ncode = AICode(msg)\n# Output: AICode(Success: True, Parsed: True, Evaluated: True, Error Caught: N/A, StdOut: True, Code: 2 Lines)\n\n# show the code\ncode.code |> println\n# Output: \n# numbers = rand(10)\n# numbers = rand(1:100, 10)\n\n# or copy it to the clipboard\ncode.code |> clipboard\n\n# or execute it in the current module (=Main)\neval(code.expression)

source


# PromptingTools.Experimental.AgentTools.aicodefixer_feedbackFunction.
julia
aicodefixer_feedback(cb::AICode; max_length::Int = 512) -> NamedTuple(; feedback::String)\naicodefixer_feedback(conversation::AbstractVector{<:PT.AbstractMessage}; max_length::Int = 512) -> NamedTuple(; feedback::String)\naicodefixer_feedback(msg::PT.AIMessage; max_length::Int = 512) -> NamedTuple(; feedback::String)\naicodefixer_feedback(aicall::AICall; max_length::Int = 512) -> NamedTuple(; feedback::String)

Generate feedback for an AI code fixing session based on the AICode block /or conversation history (that will be used to extract and evaluate a code block). Function is designed to be extensible for different types of feedback and code evaluation outcomes.

The highlevel wrapper accepts a conversation and returns new kwargs for the AICall.

Individual feedback functions are dispatched on different subtypes of AbstractCodeOutcome and can be extended/overwritten to provide more detailed feedback.

See also: AIGenerate, AICodeFixer

Arguments

Returns

Example

julia
cb = AICode(msg; skip_unsafe = true, capture_stdout = true)\nnew_kwargs = aicodefixer_feedback(cb)\n\nnew_kwargs = aicodefixer_feedback(msg)\nnew_kwargs = aicodefixer_feedback(conversation)

Notes

This function is part of the AI code fixing system, intended to interact with code in AIMessage and provide feedback on improving it.

The highlevel wrapper accepts a conversation and returns new kwargs for the AICall.

It dispatches for the code feedback based on the subtypes of AbstractCodeOutcome below:

You can override the individual methods to customize the feedback.

source


# PromptingTools.Experimental.AgentTools.error_feedbackFunction.
julia
error_feedback(e::Any; max_length::Int = 512)

Set of specialized methods to provide feedback on different types of errors (e).

source


', 42) ])); } const agent_tools_intro = /* @__PURE__ */ _export_sfc(_sfc_main, [["render", _sfc_render]]); diff --git a/previews/PR218/assets/extra_tools_agent_tools_intro.md.cSVCxyyT.lean.js b/previews/PR218/assets/extra_tools_agent_tools_intro.md.C6nMFv4B.lean.js similarity index 98% rename from previews/PR218/assets/extra_tools_agent_tools_intro.md.cSVCxyyT.lean.js rename to previews/PR218/assets/extra_tools_agent_tools_intro.md.C6nMFv4B.lean.js index 592b787ee..d1e50d4b1 100644 --- a/previews/PR218/assets/extra_tools_agent_tools_intro.md.cSVCxyyT.lean.js +++ b/previews/PR218/assets/extra_tools_agent_tools_intro.md.C6nMFv4B.lean.js @@ -3,7 +3,7 @@ const __pageData = JSON.parse('{"title":"Agent Tools Introduction","description" const _sfc_main = { name: "extra_tools/agent_tools_intro.md" }; function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { return openBlock(), createElementBlock("div", null, _cache[0] || (_cache[0] = [ - createStaticVNode('

Agent Tools Introduction

AgentTools is an experimental module that provides a set of utilities for building advanced agentic workflows, code-generating and self-fixing agents.

Import the module as follows:

julia
using PromptingTools.Experimental.AgentTools\n# to access unexported functionality\nconst AT = PromptingTools.Experimental.AgentTools

Highlights

The main functions to be aware of are:

The main contribution of this module is providing the "lazy" counterparts to the ai... functions, which allow us to build a workflow, which can be re-executed many times with the same inputs.

For example, AIGenerate() will create a lazy instance of aigenerate, which is an instance of AICall with aigenerate as its ai-calling function. It uses exactly the same arguments and keyword arguments as aigenerate (see ?aigenerate for details). The notion of "lazy" refers to the fact that it does NOT generate any output when instantiated (only when run! is called).

Or said differently, the AICall struct and all its flavors (AIGenerate, ...) are designed to facilitate a deferred execution model (lazy evaluation) for AI functions that interact with a Language Learning Model (LLM). It stores the necessary information for an AI call and executes the underlying AI function only when supplied with a UserMessage or when the run! method is applied. This allows us to remember user inputs and trigger the LLM call repeatedly if needed, which enables automatic fixing (see ?airetry!).

Examples

Automatic Fixing of AI Calls

We need to switch from aigenerate to AIGenerate to get the lazy version of the function.

julia
output = AIGenerate("Say hi!"; model="gpt4t") |> run!

How is it useful? We can use the same "inputs" for repeated calls, eg, when we want to validate or regenerate some outputs. We have a function airetry! to help us with that.

The signature of airetry is airetry(condition_function, aicall::AICall, feedback_function).

It evaluates the condition condition_function on the aicall object (eg, we evaluate f_cond(aicall) -> Bool). If it fails, we call feedback_function on the aicall object to provide feedback for the AI model (eg, f_feedback(aicall) -> String) and repeat the process until it passes or until max_retries value is exceeded.

We can catch API failures (no feedback needed, so none is provided)

julia
# API failure because of a non-existent model\n# RetryConfig allows us to change the "retry" behaviour of any lazy call\noutput = AIGenerate("say hi!"; config = RetryConfig(; catch_errors = true),\n    model = "NOTEXIST")\nrun!(output) # fails\n\n# we ask to wait 2s between retries and retry 2 times (can be set in `config` in aicall as well)\nairetry!(isvalid, output; retry_delay = 2, max_retries = 2)

Or we can use it for output validation (eg, its format, its content, etc.) and feedback generation.

Let's play a color guessing game (I'm thinking "yellow"). We'll implement two formatting checks with airetry!:

julia
# Notice that we ask for two samples (`n_samples=2`) at each attempt (to improve our chances). \n# Both guesses are scored at each time step, and the best one is chosen for the next step.\n# And with OpenAI, we can set `api_kwargs = (;n=2)` to get both samples simultaneously (cheaper and faster)!\nout = AIGenerate(\n    "Guess what color I'm thinking. It could be: blue, red, black, white, yellow. Answer with 1 word only";\n    verbose = false,\n    config = RetryConfig(; n_samples = 2), api_kwargs = (; n = 2))\nrun!(out)\n\n## Check that the output is 1 word only, third argument is the feedback that will be provided if the condition fails\n## Notice: functions operate on `aicall` as the only argument. We can use utilities like `last_output` and `last_message` to access the last message and output in the conversation.\nairetry!(x -> length(split(last_output(x), r" |\\\\.")) == 1, out,\n    "You must answer with 1 word only.")\n\n# Note: you could also use the do-syntax, eg, \nairetry!(out, "You must answer with 1 word only.") do aicall\n    length(split(last_output(aicall), r" |\\\\.")) == 1\nend

You can even add the guessing itself as an airetry! condition of last_output(out) == "yellow" and provide feedback if the guess is wrong.

References

# PromptingTools.Experimental.AgentTools.AIGenerateFunction.
julia
AIGenerate(args...; kwargs...)

Creates a lazy instance of aigenerate. It is an instance of AICall with aigenerate as the function.

Use exactly the same arguments and keyword arguments as aigenerate (see ?aigenerate for details).

source


# PromptingTools.Experimental.AgentTools.AICallType.
julia
AICall(func::F, args...; kwargs...) where {F<:Function}\n\nAIGenerate(args...; kwargs...)\nAIEmbed(args...; kwargs...)\nAIExtract(args...; kwargs...)

A lazy call wrapper for AI functions in the PromptingTools module, such as aigenerate.

The AICall struct is designed to facilitate a deferred execution model (lazy evaluation) for AI functions that interact with a Language Learning Model (LLM). It stores the necessary information for an AI call and executes the underlying AI function only when supplied with a UserMessage or when the run! method is applied. This approach allows for more flexible and efficient handling of AI function calls, especially in interactive environments.

Seel also: run!, AICodeFixer

Fields

Example

Initiate an AICall like any ai* function, eg, AIGenerate:

julia
aicall = AICall(aigenerate)\n\n# With arguments and kwargs like ai* functions\n# from `aigenerate(schema, conversation; model="abc", api_kwargs=(; temperature=0.1))`\n# to\naicall = AICall(aigenerate, schema, conversation; model="abc", api_kwargs=(; temperature=0.1)\n\n# Or with a template\naicall = AIGenerate(:JuliaExpertAsk; ask="xyz", model="abc", api_kwargs=(; temperature=0.1))

Trigger the AICall with run! (it returns the update AICall struct back):

julia
aicall |> run!\n````\n\nYou can also use `AICall` as a functor to trigger the AI call with a `UserMessage` or simply the text to send:

julia aicall(UserMessage("Hello, world!")) # Triggers the lazy call result = run!(aicall) # Explicitly runs the AI call ``` This can be used to "reply" to previous message / continue the stored conversation

Notes

source


# PromptingTools.last_outputFunction.

Extracts the last output (generated text answer) from the RAGResult.

source

Helpful accessor for AICall blocks. Returns the last output in the conversation (eg, the string/data in the last message).

source

Helpful accessor for the last generated output (msg.content) in conversation. Returns the last output in the conversation (eg, the string/data in the last message).

source


# PromptingTools.last_messageFunction.
julia
PT.last_message(result::RAGResult)

Extract the last message from the RAGResult. It looks for final_answer first, then answer fields in the conversations dictionary. Returns nothing if not found.

source

Helpful accessor for AICall blocks. Returns the last message in the conversation.

source

Helpful accessor for the last message in conversation. Returns the last message in the conversation.

source


# PromptingTools.Experimental.AgentTools.airetry!Function.
julia
airetry!(\n    f_cond::Function, aicall::AICallBlock, feedback::Union{AbstractString, Function} = "";\n    verbose::Bool = true, throw::Bool = false, evaluate_all::Bool = true, feedback_expensive::Bool = false,\n    max_retries::Union{Nothing, Int} = nothing, retry_delay::Union{Nothing, Int} = nothing)

Evaluates the condition f_cond on the aicall object. If the condition is not met, it will return the best sample to retry from and provide feedback (string or function) to aicall. That's why it's mutating. It will retry maximum max_retries times, with throw=true, an error will be thrown if the condition is not met after max_retries retries.

Note: aicall must be run first via run!(aicall) before calling airetry!.

Function signatures

You can leverage the last_message, last_output, and AICode functions to access the last message, last output and execute code blocks in the conversation, respectively. See examples below.

Good Use Cases

Gotchas

Arguments

Returns

Example

You can use airetry! to catch API errors in run! and auto-retry the call. RetryConfig is how you influence all the subsequent retry behaviours - see ?RetryConfig for more details.

julia
# API failure because of a non-existent model\nout = AIGenerate("say hi!"; config = RetryConfig(; catch_errors = true),\n    model = "NOTEXIST")\nrun!(out) # fails\n\n# we ask to wait 2s between retries and retry 2 times (can be set in `config` in aicall as well)\nairetry!(isvalid, out; retry_delay = 2, max_retries = 2)

If you provide arguments to the aicall, we try to honor them as much as possible in the following calls, eg, set low verbosity

julia
out = AIGenerate("say hi!"; config = RetryConfig(; catch_errors = true),\nmodel = "NOTEXIST", verbose=false)\nrun!(out)\n# No info message, you just see `success = false` in the properties of the AICall

Let's show a toy example to demonstrate the runtime checks / guardrails for the model output. We'll play a color guessing game (I'm thinking "yellow"):

julia
# Notice that we ask for two samples (`n_samples=2`) at each attempt (to improve our chances). \n# Both guesses are scored at each time step, and the best one is chosen for the next step.\n# And with OpenAI, we can set `api_kwargs = (;n=2)` to get both samples simultaneously (cheaper and faster)!\nout = AIGenerate(\n    "Guess what color I'm thinking. It could be: blue, red, black, white, yellow. Answer with 1 word only";\n    verbose = false,\n    config = RetryConfig(; n_samples = 2), api_kwargs = (; n = 2))\nrun!(out)\n\n\n## Check that the output is 1 word only, third argument is the feedback that will be provided if the condition fails\n## Notice: functions operate on `aicall` as the only argument. We can use utilities like `last_output` and `last_message` to access the last message and output in the conversation.\nairetry!(x -> length(split(last_output(x), r" |\\.")) == 1, out,\n    "You must answer with 1 word only.")\n\n\n## Let's ensure that the output is in lowercase - simple and short\nairetry!(x -> all(islowercase, last_output(x)), out, "You must answer in lowercase.")\n# [ Info: Condition not met. Retrying...\n\n\n## Let's add final hint - it took us 2 retries\nairetry!(x -> startswith(last_output(x), "y"), out, "It starts with "y"")\n# [ Info: Condition not met. Retrying...\n# [ Info: Condition not met. Retrying...\n\n\n## We end up with the correct answer\nlast_output(out)\n# Output: "yellow"

Let's explore how we got here. We save the various attempts in a "tree" (SampleNode object) You can access it in out.samples, which is the ROOT of the tree (top level). Currently "active" sample ID is out.active_sample_id -> that's the same as conversation field in your AICall.

julia
# Root node:\nout.samples\n# Output: SampleNode(id: 46839, stats: 6/12, length: 2)\n\n# Active sample (our correct answer):\nout.active_sample_id \n# Output: 50086\n\n# Let's obtain the active sample node with this ID  - use getindex notation or function find_node\nout.samples[out.active_sample_id]\n# Output: SampleNode(id: 50086, stats: 1/1, length: 7)\n\n# The SampleNode has two key fields: data and feedback. Data is where the conversation is stored:\nactive_sample = out.samples[out.active_sample_id]\nactive_sample.data == out.conversation # Output: true -> This is the winning guess!

We also get a clear view of the tree structure of all samples with print_samples:

julia
julia> print_samples(out.samples)\nSampleNode(id: 46839, stats: 6/12, score: 0.5, length: 2)\n├─ SampleNode(id: 12940, stats: 5/8, score: 1.41, length: 4)\n│  ├─ SampleNode(id: 34315, stats: 3/4, score: 1.77, length: 6)\n│  │  ├─ SampleNode(id: 20493, stats: 1/1, score: 2.67, length: 7)\n│  │  └─ SampleNode(id: 50086, stats: 1/1, score: 2.67, length: 7)\n│  └─ SampleNode(id: 2733, stats: 1/2, score: 1.94, length: 5)\n└─ SampleNode(id: 48343, stats: 1/4, score: 1.36, length: 4)\n   ├─ SampleNode(id: 30088, stats: 0/1, score: 1.67, length: 5)\n   └─ SampleNode(id: 44816, stats: 0/1, score: 1.67, length: 5)

You can use the id to grab and inspect any of these nodes, eg,

julia
out.samples[2733]\n# Output: SampleNode(id: 2733, stats: 1/2, length: 5)

We can also iterate through all samples and extract whatever information we want with PostOrderDFS or PreOrderDFS (exported from AbstractTrees.jl)

julia
for sample in PostOrderDFS(out.samples)\n    # Data is the universal field for samples, we put `conversation` in there\n    # Last item in data is the last message in coversation\n    msg = sample.data[end]\n    if msg isa PT.AIMessage # skip feedback\n        # get only the message content, ie, the guess\n        println("ID: $(sample.id), Answer: $(msg.content)")\n    end\nend\n\n# ID: 20493, Answer: yellow\n# ID: 50086, Answer: yellow\n# ID: 2733, Answer: red\n# ID: 30088, Answer: blue\n# ID: 44816, Answer: blue

Note: airetry! will attempt to fix the model max_retries times. If you set throw=true, it will throw an ErrorException if the condition is not met after max_retries retries.

Let's define a mini program to guess the number and use airetry! to guide the model to the correct answer:

julia
"""\n    llm_guesser()\n\nMini program to guess the number provided by the user (betwee 1-100).\n"""\nfunction llm_guesser(user_number::Int)\n    @assert 1 <= user_number <= 100\n    prompt = """\nI'm thinking a number between 1-100. Guess which one it is. \nYou must respond only with digits and nothing else. \nYour guess:"""\n    ## 2 samples at a time, max 5 fixing rounds\n    out = AIGenerate(prompt; config = RetryConfig(; n_samples = 2, max_retries = 5),\n        api_kwargs = (; n = 2)) |> run!\n    ## Check the proper output format - must parse to Int, use do-syntax\n    ## We can provide feedback via a function!\n    function feedback_f(aicall)\n        "Output: $(last_output(aicall))\nFeedback: You must respond only with digits!!"\n    end\n    airetry!(out, feedback_f) do aicall\n        !isnothing(tryparse(Int, last_output(aicall)))\n    end\n    ## Give a hint on bounds\n    lower_bound = (user_number ÷ 10) * 10\n    upper_bound = lower_bound + 10\n    airetry!(\n        out, "The number is between or equal to $lower_bound to $upper_bound.") do aicall\n        guess = tryparse(Int, last_output(aicall))\n        lower_bound <= guess <= upper_bound\n    end\n    ## You can make at most 3x guess now -- if there is max_retries in `config.max_retries` left\n    max_retries = out.config.retries + 3\n    function feedback_f2(aicall)\n        guess = tryparse(Int, last_output(aicall))\n        "Your guess of $(guess) is wrong, it's $(abs(guess-user_number)) numbers away."\n    end\n    airetry!(out, feedback_f2; max_retries) do aicall\n        tryparse(Int, last_output(aicall)) == user_number\n    end\n\n    ## Evaluate the best guess\n    @info "Results: Guess: $(last_output(out)) vs User: $user_number (Number of calls made: $(out.config.calls))"\n    return out\nend\n\n# Let's play the game\nout = llm_guesser(33)\n[ Info: Condition not met. Retrying...\n[ Info: Condition not met. Retrying...\n[ Info: Condition not met. Retrying...\n[ Info: Condition not met. Retrying...\n[ Info: Results: Guess: 33 vs User: 33 (Number of calls made: 10)

Yay! We got it 😃

Now, we could explore different samples (eg, print_samples(out.samples)) or see what the model guessed at each step:

julia
print_samples(out.samples)\n## SampleNode(id: 57694, stats: 6/14, score: 0.43, length: 2)\n## ├─ SampleNode(id: 35603, stats: 5/10, score: 1.23, length: 4)\n## │  ├─ SampleNode(id: 55394, stats: 1/4, score: 1.32, length: 6)\n## │  │  ├─ SampleNode(id: 20737, stats: 0/1, score: 1.67, length: 7)\n## │  │  └─ SampleNode(id: 52910, stats: 0/1, score: 1.67, length: 7)\n## │  └─ SampleNode(id: 43094, stats: 3/4, score: 1.82, length: 6)\n## │     ├─ SampleNode(id: 14966, stats: 1/1, score: 2.67, length: 7)\n## │     └─ SampleNode(id: 32991, stats: 1/1, score: 2.67, length: 7)\n## └─ SampleNode(id: 20506, stats: 1/4, score: 1.4, length: 4)\n##    ├─ SampleNode(id: 37581, stats: 0/1, score: 1.67, length: 5)\n##    └─ SampleNode(id: 46632, stats: 0/1, score: 1.67, length: 5)\n\n# Lastly, let's check all the guesses AI made across all samples. \n# Our winning guess was ID 32991 (`out.active_sample_id`)\n\nfor sample in PostOrderDFS(out.samples)\n    [println("ID: $(sample.id), Guess: $(msg.content)")\n     for msg in sample.data if msg isa PT.AIMessage]\nend\n## ID: 20737, Guess: 50\n## ID: 20737, Guess: 35\n## ID: 20737, Guess: 37\n## ID: 52910, Guess: 50\n## ID: 52910, Guess: 35\n## ID: 52910, Guess: 32\n## ID: 14966, Guess: 50\n## ID: 14966, Guess: 35\n## ID: 14966, Guess: 33\n## ID: 32991, Guess: 50\n## ID: 32991, Guess: 35\n## ID: 32991, Guess: 33\n## etc...

Note that if there are multiple "branches" the model will see only the feedback of its own and its ancestors not the other "branches". If you wanted to provide ALL feedback, set RetryConfig(; n_samples=1) to remove any "branching". It fixing will be done sequentially in one conversation and the model will see all feedback (less powerful if the model falls into a bad state). Alternatively, you can tweak the feedback function.

See Also

References: airetry is inspired by the Language Agent Tree Search paper and by DSPy Assertions paper.

source


# PromptingTools.Experimental.AgentTools.print_samplesFunction.

Pretty prints the samples tree starting from node. Usually, node is the root of the tree. Example: print_samples(aicall.samples).

source


# PromptingTools.AICodeType.
julia
AICode(code::AbstractString; auto_eval::Bool=true, safe_eval::Bool=false, \nskip_unsafe::Bool=false, capture_stdout::Bool=true, verbose::Bool=false,\nprefix::AbstractString="", suffix::AbstractString="", remove_tests::Bool=false, execution_timeout::Int = 60)\n\nAICode(msg::AIMessage; auto_eval::Bool=true, safe_eval::Bool=false, \nskip_unsafe::Bool=false, skip_invalid::Bool=false, capture_stdout::Bool=true,\nverbose::Bool=false, prefix::AbstractString="", suffix::AbstractString="", remove_tests::Bool=false, execution_timeout::Int = 60)

A mutable structure representing a code block (received from the AI model) with automatic parsing, execution, and output/error capturing capabilities.

Upon instantiation with a string, the AICode object automatically runs a code parser and executor (via PromptingTools.eval!()), capturing any standard output (stdout) or errors. This structure is useful for programmatically handling and evaluating Julia code snippets.

See also: PromptingTools.extract_code_blocks, PromptingTools.eval!

Workflow

Properties

Keyword Arguments

Methods

Examples

julia
code = AICode("println("Hello, World!")") # Auto-parses and evaluates the code, capturing output and errors.\nisvalid(code) # Output: true\ncode.stdout # Output: "Hello, World!\n"

We try to evaluate "safely" by default (eg, inside a custom module, to avoid changing user variables). You can avoid that with save_eval=false:

julia
code = AICode("new_variable = 1"; safe_eval=false)\nisvalid(code) # Output: true\nnew_variable # Output: 1

You can also call AICode directly on an AIMessage, which will extract the Julia code blocks, concatenate them and evaluate them:

julia
msg = aigenerate("In Julia, how do you create a vector of 10 random numbers?")\ncode = AICode(msg)\n# Output: AICode(Success: True, Parsed: True, Evaluated: True, Error Caught: N/A, StdOut: True, Code: 2 Lines)\n\n# show the code\ncode.code |> println\n# Output: \n# numbers = rand(10)\n# numbers = rand(1:100, 10)\n\n# or copy it to the clipboard\ncode.code |> clipboard\n\n# or execute it in the current module (=Main)\neval(code.expression)

source


# PromptingTools.Experimental.AgentTools.aicodefixer_feedbackFunction.
julia
aicodefixer_feedback(cb::AICode; max_length::Int = 512) -> NamedTuple(; feedback::String)\naicodefixer_feedback(conversation::AbstractVector{<:PT.AbstractMessage}; max_length::Int = 512) -> NamedTuple(; feedback::String)\naicodefixer_feedback(msg::PT.AIMessage; max_length::Int = 512) -> NamedTuple(; feedback::String)\naicodefixer_feedback(aicall::AICall; max_length::Int = 512) -> NamedTuple(; feedback::String)

Generate feedback for an AI code fixing session based on the AICode block /or conversation history (that will be used to extract and evaluate a code block). Function is designed to be extensible for different types of feedback and code evaluation outcomes.

The highlevel wrapper accepts a conversation and returns new kwargs for the AICall.

Individual feedback functions are dispatched on different subtypes of AbstractCodeOutcome and can be extended/overwritten to provide more detailed feedback.

See also: AIGenerate, AICodeFixer

Arguments

Returns

Example

julia
cb = AICode(msg; skip_unsafe = true, capture_stdout = true)\nnew_kwargs = aicodefixer_feedback(cb)\n\nnew_kwargs = aicodefixer_feedback(msg)\nnew_kwargs = aicodefixer_feedback(conversation)

Notes

This function is part of the AI code fixing system, intended to interact with code in AIMessage and provide feedback on improving it.

The highlevel wrapper accepts a conversation and returns new kwargs for the AICall.

It dispatches for the code feedback based on the subtypes of AbstractCodeOutcome below:

You can override the individual methods to customize the feedback.

source


# PromptingTools.Experimental.AgentTools.error_feedbackFunction.
julia
error_feedback(e::Any; max_length::Int = 512)

Set of specialized methods to provide feedback on different types of errors (e).

source


', 42) + createStaticVNode('

Agent Tools Introduction

AgentTools is an experimental module that provides a set of utilities for building advanced agentic workflows, code-generating and self-fixing agents.

Import the module as follows:

julia
using PromptingTools.Experimental.AgentTools\n# to access unexported functionality\nconst AT = PromptingTools.Experimental.AgentTools

Highlights

The main functions to be aware of are:

The main contribution of this module is providing the "lazy" counterparts to the ai... functions, which allow us to build a workflow, which can be re-executed many times with the same inputs.

For example, AIGenerate() will create a lazy instance of aigenerate, which is an instance of AICall with aigenerate as its ai-calling function. It uses exactly the same arguments and keyword arguments as aigenerate (see ?aigenerate for details). The notion of "lazy" refers to the fact that it does NOT generate any output when instantiated (only when run! is called).

Or said differently, the AICall struct and all its flavors (AIGenerate, ...) are designed to facilitate a deferred execution model (lazy evaluation) for AI functions that interact with a Language Learning Model (LLM). It stores the necessary information for an AI call and executes the underlying AI function only when supplied with a UserMessage or when the run! method is applied. This allows us to remember user inputs and trigger the LLM call repeatedly if needed, which enables automatic fixing (see ?airetry!).

Examples

Automatic Fixing of AI Calls

We need to switch from aigenerate to AIGenerate to get the lazy version of the function.

julia
output = AIGenerate("Say hi!"; model="gpt4t") |> run!

How is it useful? We can use the same "inputs" for repeated calls, eg, when we want to validate or regenerate some outputs. We have a function airetry! to help us with that.

The signature of airetry is airetry(condition_function, aicall::AICall, feedback_function).

It evaluates the condition condition_function on the aicall object (eg, we evaluate f_cond(aicall) -> Bool). If it fails, we call feedback_function on the aicall object to provide feedback for the AI model (eg, f_feedback(aicall) -> String) and repeat the process until it passes or until max_retries value is exceeded.

We can catch API failures (no feedback needed, so none is provided)

julia
# API failure because of a non-existent model\n# RetryConfig allows us to change the "retry" behaviour of any lazy call\noutput = AIGenerate("say hi!"; config = RetryConfig(; catch_errors = true),\n    model = "NOTEXIST")\nrun!(output) # fails\n\n# we ask to wait 2s between retries and retry 2 times (can be set in `config` in aicall as well)\nairetry!(isvalid, output; retry_delay = 2, max_retries = 2)

Or we can use it for output validation (eg, its format, its content, etc.) and feedback generation.

Let's play a color guessing game (I'm thinking "yellow"). We'll implement two formatting checks with airetry!:

julia
# Notice that we ask for two samples (`n_samples=2`) at each attempt (to improve our chances). \n# Both guesses are scored at each time step, and the best one is chosen for the next step.\n# And with OpenAI, we can set `api_kwargs = (;n=2)` to get both samples simultaneously (cheaper and faster)!\nout = AIGenerate(\n    "Guess what color I'm thinking. It could be: blue, red, black, white, yellow. Answer with 1 word only";\n    verbose = false,\n    config = RetryConfig(; n_samples = 2), api_kwargs = (; n = 2))\nrun!(out)\n\n## Check that the output is 1 word only, third argument is the feedback that will be provided if the condition fails\n## Notice: functions operate on `aicall` as the only argument. We can use utilities like `last_output` and `last_message` to access the last message and output in the conversation.\nairetry!(x -> length(split(last_output(x), r" |\\\\.")) == 1, out,\n    "You must answer with 1 word only.")\n\n# Note: you could also use the do-syntax, eg, \nairetry!(out, "You must answer with 1 word only.") do aicall\n    length(split(last_output(aicall), r" |\\\\.")) == 1\nend

You can even add the guessing itself as an airetry! condition of last_output(out) == "yellow" and provide feedback if the guess is wrong.

References

# PromptingTools.Experimental.AgentTools.AIGenerateFunction.
julia
AIGenerate(args...; kwargs...)

Creates a lazy instance of aigenerate. It is an instance of AICall with aigenerate as the function.

Use exactly the same arguments and keyword arguments as aigenerate (see ?aigenerate for details).

source


# PromptingTools.Experimental.AgentTools.AICallType.
julia
AICall(func::F, args...; kwargs...) where {F<:Function}\n\nAIGenerate(args...; kwargs...)\nAIEmbed(args...; kwargs...)\nAIExtract(args...; kwargs...)

A lazy call wrapper for AI functions in the PromptingTools module, such as aigenerate.

The AICall struct is designed to facilitate a deferred execution model (lazy evaluation) for AI functions that interact with a Language Learning Model (LLM). It stores the necessary information for an AI call and executes the underlying AI function only when supplied with a UserMessage or when the run! method is applied. This approach allows for more flexible and efficient handling of AI function calls, especially in interactive environments.

Seel also: run!, AICodeFixer

Fields

Example

Initiate an AICall like any ai* function, eg, AIGenerate:

julia
aicall = AICall(aigenerate)\n\n# With arguments and kwargs like ai* functions\n# from `aigenerate(schema, conversation; model="abc", api_kwargs=(; temperature=0.1))`\n# to\naicall = AICall(aigenerate, schema, conversation; model="abc", api_kwargs=(; temperature=0.1)\n\n# Or with a template\naicall = AIGenerate(:JuliaExpertAsk; ask="xyz", model="abc", api_kwargs=(; temperature=0.1))

Trigger the AICall with run! (it returns the update AICall struct back):

julia
aicall |> run!\n````\n\nYou can also use `AICall` as a functor to trigger the AI call with a `UserMessage` or simply the text to send:

julia aicall(UserMessage("Hello, world!")) # Triggers the lazy call result = run!(aicall) # Explicitly runs the AI call ``` This can be used to "reply" to previous message / continue the stored conversation

Notes

source


# PromptingTools.last_outputFunction.

Extracts the last output (generated text answer) from the RAGResult.

source

Helpful accessor for AICall blocks. Returns the last output in the conversation (eg, the string/data in the last message).

source

Helpful accessor for the last generated output (msg.content) in conversation. Returns the last output in the conversation (eg, the string/data in the last message).

source


# PromptingTools.last_messageFunction.
julia
PT.last_message(result::RAGResult)

Extract the last message from the RAGResult. It looks for final_answer first, then answer fields in the conversations dictionary. Returns nothing if not found.

source

Helpful accessor for AICall blocks. Returns the last message in the conversation.

source

Helpful accessor for the last message in conversation. Returns the last message in the conversation.

source


# PromptingTools.Experimental.AgentTools.airetry!Function.
julia
airetry!(\n    f_cond::Function, aicall::AICallBlock, feedback::Union{AbstractString, Function} = "";\n    verbose::Bool = true, throw::Bool = false, evaluate_all::Bool = true, feedback_expensive::Bool = false,\n    max_retries::Union{Nothing, Int} = nothing, retry_delay::Union{Nothing, Int} = nothing)

Evaluates the condition f_cond on the aicall object. If the condition is not met, it will return the best sample to retry from and provide feedback (string or function) to aicall. That's why it's mutating. It will retry maximum max_retries times, with throw=true, an error will be thrown if the condition is not met after max_retries retries.

Note: aicall must be run first via run!(aicall) before calling airetry!.

Function signatures

You can leverage the last_message, last_output, and AICode functions to access the last message, last output and execute code blocks in the conversation, respectively. See examples below.

Good Use Cases

Gotchas

Arguments

Returns

Example

You can use airetry! to catch API errors in run! and auto-retry the call. RetryConfig is how you influence all the subsequent retry behaviours - see ?RetryConfig for more details.

julia
# API failure because of a non-existent model\nout = AIGenerate("say hi!"; config = RetryConfig(; catch_errors = true),\n    model = "NOTEXIST")\nrun!(out) # fails\n\n# we ask to wait 2s between retries and retry 2 times (can be set in `config` in aicall as well)\nairetry!(isvalid, out; retry_delay = 2, max_retries = 2)

If you provide arguments to the aicall, we try to honor them as much as possible in the following calls, eg, set low verbosity

julia
out = AIGenerate("say hi!"; config = RetryConfig(; catch_errors = true),\nmodel = "NOTEXIST", verbose=false)\nrun!(out)\n# No info message, you just see `success = false` in the properties of the AICall

Let's show a toy example to demonstrate the runtime checks / guardrails for the model output. We'll play a color guessing game (I'm thinking "yellow"):

julia
# Notice that we ask for two samples (`n_samples=2`) at each attempt (to improve our chances). \n# Both guesses are scored at each time step, and the best one is chosen for the next step.\n# And with OpenAI, we can set `api_kwargs = (;n=2)` to get both samples simultaneously (cheaper and faster)!\nout = AIGenerate(\n    "Guess what color I'm thinking. It could be: blue, red, black, white, yellow. Answer with 1 word only";\n    verbose = false,\n    config = RetryConfig(; n_samples = 2), api_kwargs = (; n = 2))\nrun!(out)\n\n\n## Check that the output is 1 word only, third argument is the feedback that will be provided if the condition fails\n## Notice: functions operate on `aicall` as the only argument. We can use utilities like `last_output` and `last_message` to access the last message and output in the conversation.\nairetry!(x -> length(split(last_output(x), r" |\\.")) == 1, out,\n    "You must answer with 1 word only.")\n\n\n## Let's ensure that the output is in lowercase - simple and short\nairetry!(x -> all(islowercase, last_output(x)), out, "You must answer in lowercase.")\n# [ Info: Condition not met. Retrying...\n\n\n## Let's add final hint - it took us 2 retries\nairetry!(x -> startswith(last_output(x), "y"), out, "It starts with "y"")\n# [ Info: Condition not met. Retrying...\n# [ Info: Condition not met. Retrying...\n\n\n## We end up with the correct answer\nlast_output(out)\n# Output: "yellow"

Let's explore how we got here. We save the various attempts in a "tree" (SampleNode object) You can access it in out.samples, which is the ROOT of the tree (top level). Currently "active" sample ID is out.active_sample_id -> that's the same as conversation field in your AICall.

julia
# Root node:\nout.samples\n# Output: SampleNode(id: 46839, stats: 6/12, length: 2)\n\n# Active sample (our correct answer):\nout.active_sample_id \n# Output: 50086\n\n# Let's obtain the active sample node with this ID  - use getindex notation or function find_node\nout.samples[out.active_sample_id]\n# Output: SampleNode(id: 50086, stats: 1/1, length: 7)\n\n# The SampleNode has two key fields: data and feedback. Data is where the conversation is stored:\nactive_sample = out.samples[out.active_sample_id]\nactive_sample.data == out.conversation # Output: true -> This is the winning guess!

We also get a clear view of the tree structure of all samples with print_samples:

julia
julia> print_samples(out.samples)\nSampleNode(id: 46839, stats: 6/12, score: 0.5, length: 2)\n├─ SampleNode(id: 12940, stats: 5/8, score: 1.41, length: 4)\n│  ├─ SampleNode(id: 34315, stats: 3/4, score: 1.77, length: 6)\n│  │  ├─ SampleNode(id: 20493, stats: 1/1, score: 2.67, length: 7)\n│  │  └─ SampleNode(id: 50086, stats: 1/1, score: 2.67, length: 7)\n│  └─ SampleNode(id: 2733, stats: 1/2, score: 1.94, length: 5)\n└─ SampleNode(id: 48343, stats: 1/4, score: 1.36, length: 4)\n   ├─ SampleNode(id: 30088, stats: 0/1, score: 1.67, length: 5)\n   └─ SampleNode(id: 44816, stats: 0/1, score: 1.67, length: 5)

You can use the id to grab and inspect any of these nodes, eg,

julia
out.samples[2733]\n# Output: SampleNode(id: 2733, stats: 1/2, length: 5)

We can also iterate through all samples and extract whatever information we want with PostOrderDFS or PreOrderDFS (exported from AbstractTrees.jl)

julia
for sample in PostOrderDFS(out.samples)\n    # Data is the universal field for samples, we put `conversation` in there\n    # Last item in data is the last message in coversation\n    msg = sample.data[end]\n    if msg isa PT.AIMessage # skip feedback\n        # get only the message content, ie, the guess\n        println("ID: $(sample.id), Answer: $(msg.content)")\n    end\nend\n\n# ID: 20493, Answer: yellow\n# ID: 50086, Answer: yellow\n# ID: 2733, Answer: red\n# ID: 30088, Answer: blue\n# ID: 44816, Answer: blue

Note: airetry! will attempt to fix the model max_retries times. If you set throw=true, it will throw an ErrorException if the condition is not met after max_retries retries.

Let's define a mini program to guess the number and use airetry! to guide the model to the correct answer:

julia
"""\n    llm_guesser()\n\nMini program to guess the number provided by the user (betwee 1-100).\n"""\nfunction llm_guesser(user_number::Int)\n    @assert 1 <= user_number <= 100\n    prompt = """\nI'm thinking a number between 1-100. Guess which one it is. \nYou must respond only with digits and nothing else. \nYour guess:"""\n    ## 2 samples at a time, max 5 fixing rounds\n    out = AIGenerate(prompt; config = RetryConfig(; n_samples = 2, max_retries = 5),\n        api_kwargs = (; n = 2)) |> run!\n    ## Check the proper output format - must parse to Int, use do-syntax\n    ## We can provide feedback via a function!\n    function feedback_f(aicall)\n        "Output: $(last_output(aicall))\nFeedback: You must respond only with digits!!"\n    end\n    airetry!(out, feedback_f) do aicall\n        !isnothing(tryparse(Int, last_output(aicall)))\n    end\n    ## Give a hint on bounds\n    lower_bound = (user_number ÷ 10) * 10\n    upper_bound = lower_bound + 10\n    airetry!(\n        out, "The number is between or equal to $lower_bound to $upper_bound.") do aicall\n        guess = tryparse(Int, last_output(aicall))\n        lower_bound <= guess <= upper_bound\n    end\n    ## You can make at most 3x guess now -- if there is max_retries in `config.max_retries` left\n    max_retries = out.config.retries + 3\n    function feedback_f2(aicall)\n        guess = tryparse(Int, last_output(aicall))\n        "Your guess of $(guess) is wrong, it's $(abs(guess-user_number)) numbers away."\n    end\n    airetry!(out, feedback_f2; max_retries) do aicall\n        tryparse(Int, last_output(aicall)) == user_number\n    end\n\n    ## Evaluate the best guess\n    @info "Results: Guess: $(last_output(out)) vs User: $user_number (Number of calls made: $(out.config.calls))"\n    return out\nend\n\n# Let's play the game\nout = llm_guesser(33)\n[ Info: Condition not met. Retrying...\n[ Info: Condition not met. Retrying...\n[ Info: Condition not met. Retrying...\n[ Info: Condition not met. Retrying...\n[ Info: Results: Guess: 33 vs User: 33 (Number of calls made: 10)

Yay! We got it 😃

Now, we could explore different samples (eg, print_samples(out.samples)) or see what the model guessed at each step:

julia
print_samples(out.samples)\n## SampleNode(id: 57694, stats: 6/14, score: 0.43, length: 2)\n## ├─ SampleNode(id: 35603, stats: 5/10, score: 1.23, length: 4)\n## │  ├─ SampleNode(id: 55394, stats: 1/4, score: 1.32, length: 6)\n## │  │  ├─ SampleNode(id: 20737, stats: 0/1, score: 1.67, length: 7)\n## │  │  └─ SampleNode(id: 52910, stats: 0/1, score: 1.67, length: 7)\n## │  └─ SampleNode(id: 43094, stats: 3/4, score: 1.82, length: 6)\n## │     ├─ SampleNode(id: 14966, stats: 1/1, score: 2.67, length: 7)\n## │     └─ SampleNode(id: 32991, stats: 1/1, score: 2.67, length: 7)\n## └─ SampleNode(id: 20506, stats: 1/4, score: 1.4, length: 4)\n##    ├─ SampleNode(id: 37581, stats: 0/1, score: 1.67, length: 5)\n##    └─ SampleNode(id: 46632, stats: 0/1, score: 1.67, length: 5)\n\n# Lastly, let's check all the guesses AI made across all samples. \n# Our winning guess was ID 32991 (`out.active_sample_id`)\n\nfor sample in PostOrderDFS(out.samples)\n    [println("ID: $(sample.id), Guess: $(msg.content)")\n     for msg in sample.data if msg isa PT.AIMessage]\nend\n## ID: 20737, Guess: 50\n## ID: 20737, Guess: 35\n## ID: 20737, Guess: 37\n## ID: 52910, Guess: 50\n## ID: 52910, Guess: 35\n## ID: 52910, Guess: 32\n## ID: 14966, Guess: 50\n## ID: 14966, Guess: 35\n## ID: 14966, Guess: 33\n## ID: 32991, Guess: 50\n## ID: 32991, Guess: 35\n## ID: 32991, Guess: 33\n## etc...

Note that if there are multiple "branches" the model will see only the feedback of its own and its ancestors not the other "branches". If you wanted to provide ALL feedback, set RetryConfig(; n_samples=1) to remove any "branching". It fixing will be done sequentially in one conversation and the model will see all feedback (less powerful if the model falls into a bad state). Alternatively, you can tweak the feedback function.

See Also

References: airetry is inspired by the Language Agent Tree Search paper and by DSPy Assertions paper.

source


# PromptingTools.Experimental.AgentTools.print_samplesFunction.

Pretty prints the samples tree starting from node. Usually, node is the root of the tree. Example: print_samples(aicall.samples).

source


# PromptingTools.AICodeType.
julia
AICode(code::AbstractString; auto_eval::Bool=true, safe_eval::Bool=false, \nskip_unsafe::Bool=false, capture_stdout::Bool=true, verbose::Bool=false,\nprefix::AbstractString="", suffix::AbstractString="", remove_tests::Bool=false, execution_timeout::Int = 60)\n\nAICode(msg::AIMessage; auto_eval::Bool=true, safe_eval::Bool=false, \nskip_unsafe::Bool=false, skip_invalid::Bool=false, capture_stdout::Bool=true,\nverbose::Bool=false, prefix::AbstractString="", suffix::AbstractString="", remove_tests::Bool=false, execution_timeout::Int = 60)

A mutable structure representing a code block (received from the AI model) with automatic parsing, execution, and output/error capturing capabilities.

Upon instantiation with a string, the AICode object automatically runs a code parser and executor (via PromptingTools.eval!()), capturing any standard output (stdout) or errors. This structure is useful for programmatically handling and evaluating Julia code snippets.

See also: PromptingTools.extract_code_blocks, PromptingTools.eval!

Workflow

Properties

Keyword Arguments

Methods

Examples

julia
code = AICode("println("Hello, World!")") # Auto-parses and evaluates the code, capturing output and errors.\nisvalid(code) # Output: true\ncode.stdout # Output: "Hello, World!\n"

We try to evaluate "safely" by default (eg, inside a custom module, to avoid changing user variables). You can avoid that with save_eval=false:

julia
code = AICode("new_variable = 1"; safe_eval=false)\nisvalid(code) # Output: true\nnew_variable # Output: 1

You can also call AICode directly on an AIMessage, which will extract the Julia code blocks, concatenate them and evaluate them:

julia
msg = aigenerate("In Julia, how do you create a vector of 10 random numbers?")\ncode = AICode(msg)\n# Output: AICode(Success: True, Parsed: True, Evaluated: True, Error Caught: N/A, StdOut: True, Code: 2 Lines)\n\n# show the code\ncode.code |> println\n# Output: \n# numbers = rand(10)\n# numbers = rand(1:100, 10)\n\n# or copy it to the clipboard\ncode.code |> clipboard\n\n# or execute it in the current module (=Main)\neval(code.expression)

source


# PromptingTools.Experimental.AgentTools.aicodefixer_feedbackFunction.
julia
aicodefixer_feedback(cb::AICode; max_length::Int = 512) -> NamedTuple(; feedback::String)\naicodefixer_feedback(conversation::AbstractVector{<:PT.AbstractMessage}; max_length::Int = 512) -> NamedTuple(; feedback::String)\naicodefixer_feedback(msg::PT.AIMessage; max_length::Int = 512) -> NamedTuple(; feedback::String)\naicodefixer_feedback(aicall::AICall; max_length::Int = 512) -> NamedTuple(; feedback::String)

Generate feedback for an AI code fixing session based on the AICode block /or conversation history (that will be used to extract and evaluate a code block). Function is designed to be extensible for different types of feedback and code evaluation outcomes.

The highlevel wrapper accepts a conversation and returns new kwargs for the AICall.

Individual feedback functions are dispatched on different subtypes of AbstractCodeOutcome and can be extended/overwritten to provide more detailed feedback.

See also: AIGenerate, AICodeFixer

Arguments

Returns

Example

julia
cb = AICode(msg; skip_unsafe = true, capture_stdout = true)\nnew_kwargs = aicodefixer_feedback(cb)\n\nnew_kwargs = aicodefixer_feedback(msg)\nnew_kwargs = aicodefixer_feedback(conversation)

Notes

This function is part of the AI code fixing system, intended to interact with code in AIMessage and provide feedback on improving it.

The highlevel wrapper accepts a conversation and returns new kwargs for the AICall.

It dispatches for the code feedback based on the subtypes of AbstractCodeOutcome below:

You can override the individual methods to customize the feedback.

source


# PromptingTools.Experimental.AgentTools.error_feedbackFunction.
julia
error_feedback(e::Any; max_length::Int = 512)

Set of specialized methods to provide feedback on different types of errors (e).

source


', 42) ])); } const agent_tools_intro = /* @__PURE__ */ _export_sfc(_sfc_main, [["render", _sfc_render]]); diff --git a/previews/PR218/assets/extra_tools_api_tools_intro.md.Dbv02Yyd.js b/previews/PR218/assets/extra_tools_api_tools_intro.md.DRt6snGq.js similarity index 99% rename from previews/PR218/assets/extra_tools_api_tools_intro.md.Dbv02Yyd.js rename to previews/PR218/assets/extra_tools_api_tools_intro.md.DRt6snGq.js index 9606e978a..fedc43594 100644 --- a/previews/PR218/assets/extra_tools_api_tools_intro.md.Dbv02Yyd.js +++ b/previews/PR218/assets/extra_tools_api_tools_intro.md.DRt6snGq.js @@ -3,7 +3,7 @@ const __pageData = JSON.parse('{"title":"APITools Introduction","description":"" const _sfc_main = { name: "extra_tools/api_tools_intro.md" }; function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { return openBlock(), createElementBlock("div", null, _cache[0] || (_cache[0] = [ - createStaticVNode('

APITools Introduction

APITools is an experimental module wrapping helpful APIs for working with and enhancing GenerativeAI models.

Import the module as follows:

julia
using PromptingTools.Experimental.APITools

Highlights

Currently, there is only one function in this module create_websearch that leverages Tavily.com search and answer engine to provide additional context.

You need to sign up for an API key at Tavily.com and set it as an environment variable TAVILY_API_KEY to use this function.

References

# PromptingTools.Experimental.APITools.create_websearchFunction.
julia
create_websearch(query::AbstractString;\n    api_key::AbstractString,\n    search_depth::AbstractString = "basic")

Arguments

Example

julia
r = create_websearch("Who is King Charles?")

Even better, you can get not just the results but also the answer:

julia
r = create_websearch("Who is King Charles?"; include_answer = true)

See Rest API documentation for more information.

source


', 10) + createStaticVNode('

APITools Introduction

APITools is an experimental module wrapping helpful APIs for working with and enhancing GenerativeAI models.

Import the module as follows:

julia
using PromptingTools.Experimental.APITools

Highlights

Currently, there is only one function in this module create_websearch that leverages Tavily.com search and answer engine to provide additional context.

You need to sign up for an API key at Tavily.com and set it as an environment variable TAVILY_API_KEY to use this function.

References

# PromptingTools.Experimental.APITools.create_websearchFunction.
julia
create_websearch(query::AbstractString;\n    api_key::AbstractString,\n    search_depth::AbstractString = "basic")

Arguments

Example

julia
r = create_websearch("Who is King Charles?")

Even better, you can get not just the results but also the answer:

julia
r = create_websearch("Who is King Charles?"; include_answer = true)

See Rest API documentation for more information.

source


', 10) ])); } const api_tools_intro = /* @__PURE__ */ _export_sfc(_sfc_main, [["render", _sfc_render]]); diff --git a/previews/PR218/assets/extra_tools_api_tools_intro.md.Dbv02Yyd.lean.js b/previews/PR218/assets/extra_tools_api_tools_intro.md.DRt6snGq.lean.js similarity index 99% rename from previews/PR218/assets/extra_tools_api_tools_intro.md.Dbv02Yyd.lean.js rename to previews/PR218/assets/extra_tools_api_tools_intro.md.DRt6snGq.lean.js index 9606e978a..fedc43594 100644 --- a/previews/PR218/assets/extra_tools_api_tools_intro.md.Dbv02Yyd.lean.js +++ b/previews/PR218/assets/extra_tools_api_tools_intro.md.DRt6snGq.lean.js @@ -3,7 +3,7 @@ const __pageData = JSON.parse('{"title":"APITools Introduction","description":"" const _sfc_main = { name: "extra_tools/api_tools_intro.md" }; function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { return openBlock(), createElementBlock("div", null, _cache[0] || (_cache[0] = [ - createStaticVNode('

APITools Introduction

APITools is an experimental module wrapping helpful APIs for working with and enhancing GenerativeAI models.

Import the module as follows:

julia
using PromptingTools.Experimental.APITools

Highlights

Currently, there is only one function in this module create_websearch that leverages Tavily.com search and answer engine to provide additional context.

You need to sign up for an API key at Tavily.com and set it as an environment variable TAVILY_API_KEY to use this function.

References

# PromptingTools.Experimental.APITools.create_websearchFunction.
julia
create_websearch(query::AbstractString;\n    api_key::AbstractString,\n    search_depth::AbstractString = "basic")

Arguments

Example

julia
r = create_websearch("Who is King Charles?")

Even better, you can get not just the results but also the answer:

julia
r = create_websearch("Who is King Charles?"; include_answer = true)

See Rest API documentation for more information.

source


', 10) + createStaticVNode('

APITools Introduction

APITools is an experimental module wrapping helpful APIs for working with and enhancing GenerativeAI models.

Import the module as follows:

julia
using PromptingTools.Experimental.APITools

Highlights

Currently, there is only one function in this module create_websearch that leverages Tavily.com search and answer engine to provide additional context.

You need to sign up for an API key at Tavily.com and set it as an environment variable TAVILY_API_KEY to use this function.

References

# PromptingTools.Experimental.APITools.create_websearchFunction.
julia
create_websearch(query::AbstractString;\n    api_key::AbstractString,\n    search_depth::AbstractString = "basic")

Arguments

Example

julia
r = create_websearch("Who is King Charles?")

Even better, you can get not just the results but also the answer:

julia
r = create_websearch("Who is King Charles?"; include_answer = true)

See Rest API documentation for more information.

source


', 10) ])); } const api_tools_intro = /* @__PURE__ */ _export_sfc(_sfc_main, [["render", _sfc_render]]); diff --git a/previews/PR218/assets/extra_tools_rag_tools_intro.md.CJjL2hwq.lean.js b/previews/PR218/assets/extra_tools_rag_tools_intro.md.oi28ZdI4.js similarity index 99% rename from previews/PR218/assets/extra_tools_rag_tools_intro.md.CJjL2hwq.lean.js rename to previews/PR218/assets/extra_tools_rag_tools_intro.md.oi28ZdI4.js index f1dfa7877..b778ea87d 100644 --- a/previews/PR218/assets/extra_tools_rag_tools_intro.md.CJjL2hwq.lean.js +++ b/previews/PR218/assets/extra_tools_rag_tools_intro.md.oi28ZdI4.js @@ -5,7 +5,7 @@ const __pageData = JSON.parse('{"title":"RAG Tools Introduction","description":" const _sfc_main = { name: "extra_tools/rag_tools_intro.md" }; function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { return openBlock(), createElementBlock("div", null, _cache[0] || (_cache[0] = [ - createStaticVNode('

RAG Tools Introduction

RAGTools is an experimental module that provides a set of utilities for building Retrieval-Augmented Generation (RAG) applications, ie, applications that generate answers by combining knowledge of the underlying AI model with the information from the user's knowledge base.

It is designed to be powerful and flexible, allowing you to build RAG applications with minimal effort. Extend any step of the pipeline with your own custom code (see the RAG Interface section), or use the provided defaults to get started quickly.

Once the API stabilizes (near term), we hope to carve it out into a separate package.

Import the module as follows:

julia
# required dependencies to load the necessary extensions!!!\nusing LinearAlgebra, SparseArrays, Unicode, Snowball\nusing PromptingTools.Experimental.RAGTools\n# to access unexported functionality\nconst RT = PromptingTools.Experimental.RAGTools

Highlights

The main functions to be aware of are:

The hope is to provide a modular and easily extensible set of tools for building RAG applications in Julia. Feel free to open an issue or ask in the #generative-ai channel in the JuliaLang Slack if you have a specific need.

Examples

Let's build an index, we need to provide a starter list of documents:

julia
sentences = [\n    "Find the most comprehensive guide on Julia programming language for beginners published in 2023.",\n    "Search for the latest advancements in quantum computing using Julia language.",\n    "How to implement machine learning algorithms in Julia with examples.",\n    "Looking for performance comparison between Julia, Python, and R for data analysis.",\n    "Find Julia language tutorials focusing on high-performance scientific computing.",\n    "Search for the top Julia language packages for data visualization and their documentation.",\n    "How to set up a Julia development environment on Windows 10.",\n    "Discover the best practices for parallel computing in Julia.",\n    "Search for case studies of large-scale data processing using Julia.",\n    "Find comprehensive resources for mastering metaprogramming in Julia.",\n    "Looking for articles on the advantages of using Julia for statistical modeling.",\n    "How to contribute to the Julia open-source community: A step-by-step guide.",\n    "Find the comparison of numerical accuracy between Julia and MATLAB.",\n    "Looking for the latest Julia language updates and their impact on AI research.",\n    "How to efficiently handle big data with Julia: Techniques and libraries.",\n    "Discover how Julia integrates with other programming languages and tools.",\n    "Search for Julia-based frameworks for developing web applications.",\n    "Find tutorials on creating interactive dashboards with Julia.",\n    "How to use Julia for natural language processing and text analysis.",\n    "Discover the role of Julia in the future of computational finance and econometrics."\n]

Let's index these "documents":

julia
index = build_index(sentences; chunker_kwargs=(; sources=map(i -> "Doc$i", 1:length(sentences))))

This would be equivalent to the following index = build_index(SimpleIndexer(), sentences) which dispatches to the default implementation of each step via the SimpleIndexer struct. We provide these default implementations for the main functions as an optional argument - no need to provide them if you're running the default pipeline.

Notice that we have provided a chunker_kwargs argument to the build_index function. These will be kwargs passed to chunker step.

Now let's generate an answer to a question.

  1. Run end-to-end RAG (retrieve + generate!), return AIMessage
julia
question = "What are the best practices for parallel computing in Julia?"\n\nmsg = airag(index; question) # short for airag(RAGConfig(), index; question)\n## Output:\n## [ Info: Done with RAG. Total cost: \\$0.0\n## AIMessage("Some best practices for parallel computing in Julia include us...
  1. Explore what's happening under the hood by changing the return type - RAGResult contains all intermediate steps.
julia
result = airag(index; question, return_all=true)\n## RAGResult\n##   question: String "What are the best practices for parallel computing in Julia?"\n##   rephrased_questions: Array{String}((1,))\n##   answer: SubString{String}\n##   final_answer: SubString{String}\n##   context: Array{String}((5,))\n##   sources: Array{String}((5,))\n##   emb_candidates: CandidateChunks{Int64, Float32}\n##   tag_candidates: CandidateChunks{Int64, Float32}\n##   filtered_candidates: CandidateChunks{Int64, Float32}\n##   reranked_candidates: CandidateChunks{Int64, Float32}\n##   conversations: Dict{Symbol, Vector{<:PromptingTools.AbstractMessage}}

You can still get the message from the result, see result.conversations[:final_answer] (the dictionary keys correspond to the function names of those steps).

  1. If you need to customize it, break the pipeline into its sub-steps: retrieve and generate - RAGResult serves as the intermediate result.
julia
# Retrieve which chunks are relevant to the question\nresult = retrieve(index, question)\n# Generate an answer\nresult = generate!(index, result)

You can leverage a pretty-printing system with pprint where we automatically annotate the support of the answer by the chunks we provided to the model. It is configurable and you can select only some of its functions (eg, scores, sources).

julia
pprint(result)

You'll see the following in REPL but with COLOR highlighting in the terminal.

plaintext
--------------------\nQUESTION(s)\n--------------------\n- What are the best practices for parallel computing in Julia?\n\n--------------------\nANSWER\n--------------------\nSome of the best practices for parallel computing in Julia include:[1,0.7]\n- Using [3,0.4]`@threads` for simple parallelism[1,0.34]\n- Utilizing `Distributed` module for more complex parallel tasks[1,0.19]\n- Avoiding excessive memory allocation\n- Considering task granularity for efficient workload distribution\n\n--------------------\nSOURCES\n--------------------\n1. Doc8\n2. Doc15\n3. Doc5\n4. Doc2\n5. Doc9

See ?print_html for the HTML version of the pretty-printing and styling system, eg, when you want to display the results in a web application based on Genie.jl/Stipple.jl.

How to read the output

Want more?

See examples/building_RAG.jl for one more example.

RAG Interface

System Overview

This system is designed for information retrieval and response generation, structured in three main phases:

The corresponding functions are build_index, retrieve, and generate!, respectively. Here is the high-level diagram that shows the signature of the main functions:

Notice that the first argument is a custom type for multiple dispatch. In addition, observe the "kwargs" names, that's how the keyword arguments for each function are passed down from the higher-level functions (eg, build_index(...; chunker_kwargs=(; separators=...)))). It's the simplest way to customize some step of the pipeline (eg, set a custom model with a model kwarg or prompt template with template kwarg).

The system is designed to be hackable and extensible at almost every entry point. If you want to customize the behavior of any step, you can do so by defining a new type and defining a new method for the step you're changing, eg,

julia
PromptingTools.Experimental.RAGTools: rerank\n\nstruct MyReranker <: AbstractReranker end\nrerank(::MyReranker, index, candidates) = ...

And then you would set the retrive step to use your custom MyReranker via reranker kwarg, eg, retrieve(....; reranker = MyReranker()) (or customize the main dispatching AbstractRetriever struct).

The overarching principles are:

RAG Diagram

The main functions are:

Prepare your document index with build_index:

Run E2E RAG with airag:

Retrieve relevant chunks with retrieve:

Generate an answer from relevant chunks with generate!:

To discover the currently available implementations, use subtypes function, eg, subtypes(AbstractReranker).

Passing Keyword Arguments

If you need to pass keyword arguments, use the nested kwargs corresponding to the dispatch type names (rephrase step, has rephraser dispatch type and rephraser_kwargs for its keyword arguments).

For example:

julia
cfg = RAGConfig(; retriever = AdvancedRetriever())\n\n# kwargs will be big and nested, let's prepare them upfront\n# we specify "custom" model for each component that calls LLM\nkwargs = (\n    retriever = AdvancedRetriever(),\n    retriever_kwargs = (;\n        top_k = 100,\n        top_n = 5,\n        # notice that this is effectively: retriever_kwargs/rephraser_kwargs/template\n        rephraser_kwargs = (;\n            template = :RAGQueryHyDE,\n            model = "custom")),\n    generator_kwargs = (;\n        # pass kwargs to `answer!` step defined by the `answerer` -> we're setting `answerer_kwargs`\n        answerer_kwargs = (;\n            model = "custom"),\n    # api_kwargs can be shared across all components\n    api_kwargs = (;\n        url = "http://localhost:8080")))\n\nresult = airag(cfg, index, question; kwargs...)

If you were one level deeper in the pipeline, working with retriever directly, you would pass:

julia
retriever_kwargs = (;\n    top_k = 100,\n    top_n = 5,\n    # notice that this is effectively: rephraser_kwargs/template\n    rephraser_kwargs = (;\n      template = :RAGQueryHyDE,\n      model = "custom"),\n  # api_kwargs can be shared across all components\n  api_kwargs = (;\n      url = "http://localhost:8080"))\n\nresult = retrieve(AdvancedRetriever(), index, question; retriever_kwargs...)

And going even deeper, you would provide the rephraser_kwargs directly to the rephrase step, eg,

julia
rephrase(SimpleRephraser(), question; model="custom", template = :RAGQueryHyDE, api_kwargs = (; url = "http://localhost:8080"))

Deepdive

Preparation Phase:

Retrieval Phase:

Generation Phase:

Note that all generation steps are mutating the RAGResult object.

See more details and corresponding functions and types in src/Experimental/RAGTools/rag_interface.jl.

References

# PromptingTools.Experimental.RAGTools.build_indexFunction.
julia
build_index(\n    indexer::AbstractIndexBuilder, files_or_docs::Vector{<:AbstractString};\n    verbose::Integer = 1,\n    extras::Union{Nothing, AbstractVector} = nothing,\n    index_id = gensym("ChunkEmbeddingsIndex"),\n    chunker::AbstractChunker = indexer.chunker,\n    chunker_kwargs::NamedTuple = NamedTuple(),\n    embedder::AbstractEmbedder = indexer.embedder,\n    embedder_kwargs::NamedTuple = NamedTuple(),\n    tagger::AbstractTagger = indexer.tagger,\n    tagger_kwargs::NamedTuple = NamedTuple(),\n    api_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0))

Build an INDEX for RAG (Retriever-Augmented Generation) applications from the provided file paths. INDEX is a object storing the document chunks and their embeddings (and potentially other information).

The function processes each file or document (depending on chunker), splits its content into chunks, embeds these chunks, optionally extracts metadata, and then combines this information into a retrievable index.

Define your own methods via indexer and its subcomponents (chunker, embedder, tagger).

Arguments

Returns

See also: ChunkEmbeddingsIndex, get_chunks, get_embeddings, get_tags, CandidateChunks, find_closest, find_tags, rerank, retrieve, generate!, airag

Examples

julia
# Default is loading a vector of strings and chunking them (`TextChunker()`)\nindex = build_index(SimpleIndexer(), texts; chunker_kwargs = (; max_length=10))\n\n# Another example with tags extraction, splitting only sentences and verbose output\n# Assuming `test_files` is a vector of file paths\nindexer = SimpleIndexer(chunker=FileChunker(), tagger=OpenTagger())\nindex = build_index(indexer, test_files; \n        chunker_kwargs(; separators=[". "]), verbose=true)

Notes

source

julia
build_index(\n    indexer::KeywordsIndexer, files_or_docs::Vector{<:AbstractString};\n    verbose::Integer = 1,\n    extras::Union{Nothing, AbstractVector} = nothing,\n    index_id = gensym("ChunkKeywordsIndex"),\n    chunker::AbstractChunker = indexer.chunker,\n    chunker_kwargs::NamedTuple = NamedTuple(),\n    processor::AbstractProcessor = indexer.processor,\n    processor_kwargs::NamedTuple = NamedTuple(),\n    tagger::AbstractTagger = indexer.tagger,\n    tagger_kwargs::NamedTuple = NamedTuple(),\n    api_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0))

Builds a ChunkKeywordsIndex from the provided files or documents to support keyword-based search (BM25).

source


# PromptingTools.Experimental.RAGTools.airagFunction.
julia
airag(cfg::AbstractRAGConfig, index::AbstractDocumentIndex;\n    question::AbstractString,\n    verbose::Integer = 1, return_all::Bool = false,\n    api_kwargs::NamedTuple = NamedTuple(),\n    retriever::AbstractRetriever = cfg.retriever,\n    retriever_kwargs::NamedTuple = NamedTuple(),\n    generator::AbstractGenerator = cfg.generator,\n    generator_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0))

High-level wrapper for Retrieval-Augmented Generation (RAG), it combines together the retrieve and generate! steps which you can customize if needed.

The simplest version first finds the relevant chunks in index for the question and then sends these chunks to the AI model to help with generating a response to the question.

To customize the components, replace the types (retriever, generator) of the corresponding step of the RAG pipeline - or go into sub-routines within the steps. Eg, use subtypes(AbstractRetriever) to find the available options.

Arguments

Returns

See also build_index, retrieve, generate!, RAGResult, getpropertynested, setpropertynested, merge_kwargs_nested, ChunkKeywordsIndex.

Examples

Using airag to get a response for a question:

julia
index = build_index(...)  # create an index\nquestion = "How to make a barplot in Makie.jl?"\nmsg = airag(index; question)

To understand the details of the RAG process, use return_all=true

julia
msg, details = airag(index; question, return_all = true)\n# details is a RAGDetails object with all the internal steps of the `airag` function

You can also pretty-print details to highlight generated text vs text that is supported by context. It also includes annotations of which context was used for each part of the response (where available).

julia
PT.pprint(details)

Example with advanced retrieval (with question rephrasing and reranking (requires COHERE_API_KEY). We will obtain top 100 chunks from embeddings (top_k) and top 5 chunks from reranking (top_n). In addition, it will be done with a "custom" locally-hosted model.

julia
cfg = RAGConfig(; retriever = AdvancedRetriever())\n\n# kwargs will be big and nested, let's prepare them upfront\n# we specify "custom" model for each component that calls LLM\nkwargs = (\n    retriever_kwargs = (;\n        top_k = 100,\n        top_n = 5,\n        rephraser_kwargs = (;\n            model = "custom"),\n        embedder_kwargs = (;\n            model = "custom"),\n        tagger_kwargs = (;\n            model = "custom")),\n    generator_kwargs = (;\n        answerer_kwargs = (;\n            model = "custom"),\n        refiner_kwargs = (;\n            model = "custom")),\n    api_kwargs = (;\n        url = "http://localhost:8080"))\n\nresult = airag(cfg, index, question; kwargs...)

If you want to use hybrid retrieval (embeddings + BM25), you can easily create an additional index based on keywords and pass them both into a MultiIndex.

You need to provide an explicit config, so the pipeline knows how to handle each index in the search similarity phase (finder).

julia
index = # your existing index\n\n# create the multi-index with the keywords index\nindex_keywords = ChunkKeywordsIndex(index)\nmulti_index = MultiIndex([index, index_keywords])\n\n# define the similarity measures for the indices that you have (same order)\nfinder = RT.MultiFinder([RT.CosineSimilarity(), RT.BM25Similarity()])\ncfg = RAGConfig(; retriever=AdvancedRetriever(; processor=RT.KeywordsProcessor(), finder))\n\n# Run the pipeline with the new hybrid retrieval (return the `RAGResult` to see the details)\nresult = airag(cfg, multi_index; question, return_all=true)\n\n# Pretty-print the result\nPT.pprint(result)

For easier manipulation of nested kwargs, see utilities getpropertynested, setpropertynested, merge_kwargs_nested.

source


# PromptingTools.Experimental.RAGTools.retrieveFunction.
julia
retrieve(retriever::AbstractRetriever,\n    index::AbstractChunkIndex,\n    question::AbstractString;\n    verbose::Integer = 1,\n    top_k::Integer = 100,\n    top_n::Integer = 5,\n    api_kwargs::NamedTuple = NamedTuple(),\n    rephraser::AbstractRephraser = retriever.rephraser,\n    rephraser_kwargs::NamedTuple = NamedTuple(),\n    embedder::AbstractEmbedder = retriever.embedder,\n    embedder_kwargs::NamedTuple = NamedTuple(),\n    processor::AbstractProcessor = retriever.processor,\n    processor_kwargs::NamedTuple = NamedTuple(),\n    finder::AbstractSimilarityFinder = retriever.finder,\n    finder_kwargs::NamedTuple = NamedTuple(),\n    tagger::AbstractTagger = retriever.tagger,\n    tagger_kwargs::NamedTuple = NamedTuple(),\n    filter::AbstractTagFilter = retriever.filter,\n    filter_kwargs::NamedTuple = NamedTuple(),\n    reranker::AbstractReranker = retriever.reranker,\n    reranker_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Retrieves the most relevant chunks from the index for the given question and returns them in the RAGResult object.

This is the main entry point for the retrieval stage of the RAG pipeline. It is often followed by generate! step.

Notes:

The arguments correspond to the steps of the retrieval process (rephrasing, embedding, finding similar docs, tagging, filtering by tags, reranking). You can customize each step by providing a new custom type that dispatches the corresponding function, eg, create your own type struct MyReranker<:AbstractReranker end and define the custom method for it rerank(::MyReranker,...) = ....

Note: Discover available retrieval sub-types for each step with subtypes(AbstractRephraser) and similar for other abstract types.

If you're using locally-hosted models, you can pass the api_kwargs with the url field set to the model's URL and make sure to provide corresponding model kwargs to rephraser, embedder, and tagger to use the custom models (they make AI calls).

Arguments

See also: SimpleRetriever, AdvancedRetriever, build_index, rephrase, get_embeddings, get_keywords, find_closest, get_tags, find_tags, rerank, RAGResult.

Examples

Find the 5 most relevant chunks from the index for the given question.

julia
# assumes you have an existing index `index`\nretriever = SimpleRetriever()\n\nresult = retrieve(retriever,\n    index,\n    "What is the capital of France?",\n    top_n = 5)\n\n# or use the default retriever (same as above)\nresult = retrieve(retriever,\n    index,\n    "What is the capital of France?",\n    top_n = 5)

Apply more advanced retrieval with question rephrasing and reranking (requires COHERE_API_KEY). We will obtain top 100 chunks from embeddings (top_k) and top 5 chunks from reranking (top_n).

julia
retriever = AdvancedRetriever()\n\nresult = retrieve(retriever, index, question; top_k=100, top_n=5)

You can use the retriever to customize your retrieval strategy or directly change the strategy types in the retrieve kwargs!

Example of using locally-hosted model hosted on localhost:8080:

julia
retriever = SimpleRetriever()\nresult = retrieve(retriever, index, question;\n    rephraser_kwargs = (; model = "custom"),\n    embedder_kwargs = (; model = "custom"),\n    tagger_kwargs = (; model = "custom"), api_kwargs = (;\n        url = "http://localhost:8080"))

source


# PromptingTools.Experimental.RAGTools.generate!Function.
julia
generate!(\n    generator::AbstractGenerator, index::AbstractDocumentIndex, result::AbstractRAGResult;\n    verbose::Integer = 1,\n    api_kwargs::NamedTuple = NamedTuple(),\n    contexter::AbstractContextBuilder = generator.contexter,\n    contexter_kwargs::NamedTuple = NamedTuple(),\n    answerer::AbstractAnswerer = generator.answerer,\n    answerer_kwargs::NamedTuple = NamedTuple(),\n    refiner::AbstractRefiner = generator.refiner,\n    refiner_kwargs::NamedTuple = NamedTuple(),\n    postprocessor::AbstractPostprocessor = generator.postprocessor,\n    postprocessor_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Generate the response using the provided generator and the index and result. It is the second step in the RAG pipeline (after retrieve)

Returns the mutated result with the result.final_answer and the full conversation saved in result.conversations[:final_answer].

Notes

Arguments

See also: retrieve, build_context!, ContextEnumerator, answer!, SimpleAnswerer, refine!, NoRefiner, SimpleRefiner, postprocess!, NoPostprocessor

Examples

julia
Assume we already have `index`\n\nquestion = "What are the best practices for parallel computing in Julia?"\n\n# Retrieve the relevant chunks - returns RAGResult\nresult = retrieve(index, question)\n\n# Generate the answer using the default generator, mutates the same result\nresult = generate!(index, result)

source


# PromptingTools.Experimental.RAGTools.annotate_supportFunction.
julia
annotate_support(annotater::TrigramAnnotater, answer::AbstractString,\n    context::AbstractVector; min_score::Float64 = 0.5,\n    skip_trigrams::Bool = true, hashed::Bool = true,\n    sources::Union{Nothing, AbstractVector{<:AbstractString}} = nothing,\n    min_source_score::Float64 = 0.25,\n    add_sources::Bool = true,\n    add_scores::Bool = true, kwargs...)

Annotates the answer with the overlap/what's supported in context and returns the annotated tree of nodes representing the answer

Returns a "root" node with children nodes representing the sentences/code blocks in the answer. Only the "leaf" nodes are to be printed (to avoid duplication), "leaf" nodes are those with NO children.

Default logic:

Arguments

Example

julia
annotater = TrigramAnnotater()\ncontext = [\n    "This is a test context.", "Another context sentence.", "Final piece of context."]\nanswer = "This is a test context. Another context sentence."\n\nannotated_root = annotate_support(annotater, answer, context)\npprint(annotated_root) # pretty print the annotated tree

source

julia
annotate_support(\n    annotater::TrigramAnnotater, result::AbstractRAGResult; min_score::Float64 = 0.5,\n    skip_trigrams::Bool = true, hashed::Bool = true,\n    min_source_score::Float64 = 0.25,\n    add_sources::Bool = true,\n    add_scores::Bool = true, kwargs...)

Dispatch for annotate_support for AbstractRAGResult type. It extracts the final_answer and context from the result and calls annotate_support with them.

See annotate_support for more details.

Example

julia
res = RAGResult(; question = "", final_answer = "This is a test.",\n    context = ["Test context.", "Completely different"])\nannotated_root = annotate_support(annotater, res)\nPT.pprint(annotated_root)

source


# PromptingTools.Experimental.RAGTools.build_qa_evalsFunction.
julia
build_qa_evals(doc_chunks::Vector{<:AbstractString}, sources::Vector{<:AbstractString};\n               model=PT.MODEL_CHAT, instructions="None.", qa_template::Symbol=:RAGCreateQAFromContext, \n               verbose::Bool=true, api_kwargs::NamedTuple = NamedTuple(), kwargs...) -> Vector{QAEvalItem}

Create a collection of question and answer evaluations (QAEvalItem) from document chunks and sources. This function generates Q&A pairs based on the provided document chunks, using a specified AI model and template.

Arguments

Returns

Vector{QAEvalItem}: A vector of QAEvalItem structs, each containing a source, context, question, and answer. Invalid or empty items are filtered out.

Notes

Examples

Creating Q&A evaluations from a set of document chunks:

julia
doc_chunks = ["Text from document 1", "Text from document 2"]\nsources = ["source1", "source2"]\nqa_evals = build_qa_evals(doc_chunks, sources)

source


', 88) + createStaticVNode('

RAG Tools Introduction

RAGTools is an experimental module that provides a set of utilities for building Retrieval-Augmented Generation (RAG) applications, ie, applications that generate answers by combining knowledge of the underlying AI model with the information from the user's knowledge base.

It is designed to be powerful and flexible, allowing you to build RAG applications with minimal effort. Extend any step of the pipeline with your own custom code (see the RAG Interface section), or use the provided defaults to get started quickly.

Once the API stabilizes (near term), we hope to carve it out into a separate package.

Import the module as follows:

julia
# required dependencies to load the necessary extensions!!!\nusing LinearAlgebra, SparseArrays, Unicode, Snowball\nusing PromptingTools.Experimental.RAGTools\n# to access unexported functionality\nconst RT = PromptingTools.Experimental.RAGTools

Highlights

The main functions to be aware of are:

The hope is to provide a modular and easily extensible set of tools for building RAG applications in Julia. Feel free to open an issue or ask in the #generative-ai channel in the JuliaLang Slack if you have a specific need.

Examples

Let's build an index, we need to provide a starter list of documents:

julia
sentences = [\n    "Find the most comprehensive guide on Julia programming language for beginners published in 2023.",\n    "Search for the latest advancements in quantum computing using Julia language.",\n    "How to implement machine learning algorithms in Julia with examples.",\n    "Looking for performance comparison between Julia, Python, and R for data analysis.",\n    "Find Julia language tutorials focusing on high-performance scientific computing.",\n    "Search for the top Julia language packages for data visualization and their documentation.",\n    "How to set up a Julia development environment on Windows 10.",\n    "Discover the best practices for parallel computing in Julia.",\n    "Search for case studies of large-scale data processing using Julia.",\n    "Find comprehensive resources for mastering metaprogramming in Julia.",\n    "Looking for articles on the advantages of using Julia for statistical modeling.",\n    "How to contribute to the Julia open-source community: A step-by-step guide.",\n    "Find the comparison of numerical accuracy between Julia and MATLAB.",\n    "Looking for the latest Julia language updates and their impact on AI research.",\n    "How to efficiently handle big data with Julia: Techniques and libraries.",\n    "Discover how Julia integrates with other programming languages and tools.",\n    "Search for Julia-based frameworks for developing web applications.",\n    "Find tutorials on creating interactive dashboards with Julia.",\n    "How to use Julia for natural language processing and text analysis.",\n    "Discover the role of Julia in the future of computational finance and econometrics."\n]

Let's index these "documents":

julia
index = build_index(sentences; chunker_kwargs=(; sources=map(i -> "Doc$i", 1:length(sentences))))

This would be equivalent to the following index = build_index(SimpleIndexer(), sentences) which dispatches to the default implementation of each step via the SimpleIndexer struct. We provide these default implementations for the main functions as an optional argument - no need to provide them if you're running the default pipeline.

Notice that we have provided a chunker_kwargs argument to the build_index function. These will be kwargs passed to chunker step.

Now let's generate an answer to a question.

  1. Run end-to-end RAG (retrieve + generate!), return AIMessage
julia
question = "What are the best practices for parallel computing in Julia?"\n\nmsg = airag(index; question) # short for airag(RAGConfig(), index; question)\n## Output:\n## [ Info: Done with RAG. Total cost: \\$0.0\n## AIMessage("Some best practices for parallel computing in Julia include us...
  1. Explore what's happening under the hood by changing the return type - RAGResult contains all intermediate steps.
julia
result = airag(index; question, return_all=true)\n## RAGResult\n##   question: String "What are the best practices for parallel computing in Julia?"\n##   rephrased_questions: Array{String}((1,))\n##   answer: SubString{String}\n##   final_answer: SubString{String}\n##   context: Array{String}((5,))\n##   sources: Array{String}((5,))\n##   emb_candidates: CandidateChunks{Int64, Float32}\n##   tag_candidates: CandidateChunks{Int64, Float32}\n##   filtered_candidates: CandidateChunks{Int64, Float32}\n##   reranked_candidates: CandidateChunks{Int64, Float32}\n##   conversations: Dict{Symbol, Vector{<:PromptingTools.AbstractMessage}}

You can still get the message from the result, see result.conversations[:final_answer] (the dictionary keys correspond to the function names of those steps).

  1. If you need to customize it, break the pipeline into its sub-steps: retrieve and generate - RAGResult serves as the intermediate result.
julia
# Retrieve which chunks are relevant to the question\nresult = retrieve(index, question)\n# Generate an answer\nresult = generate!(index, result)

You can leverage a pretty-printing system with pprint where we automatically annotate the support of the answer by the chunks we provided to the model. It is configurable and you can select only some of its functions (eg, scores, sources).

julia
pprint(result)

You'll see the following in REPL but with COLOR highlighting in the terminal.

plaintext
--------------------\nQUESTION(s)\n--------------------\n- What are the best practices for parallel computing in Julia?\n\n--------------------\nANSWER\n--------------------\nSome of the best practices for parallel computing in Julia include:[1,0.7]\n- Using [3,0.4]`@threads` for simple parallelism[1,0.34]\n- Utilizing `Distributed` module for more complex parallel tasks[1,0.19]\n- Avoiding excessive memory allocation\n- Considering task granularity for efficient workload distribution\n\n--------------------\nSOURCES\n--------------------\n1. Doc8\n2. Doc15\n3. Doc5\n4. Doc2\n5. Doc9

See ?print_html for the HTML version of the pretty-printing and styling system, eg, when you want to display the results in a web application based on Genie.jl/Stipple.jl.

How to read the output

Want more?

See examples/building_RAG.jl for one more example.

RAG Interface

System Overview

This system is designed for information retrieval and response generation, structured in three main phases:

The corresponding functions are build_index, retrieve, and generate!, respectively. Here is the high-level diagram that shows the signature of the main functions:

Notice that the first argument is a custom type for multiple dispatch. In addition, observe the "kwargs" names, that's how the keyword arguments for each function are passed down from the higher-level functions (eg, build_index(...; chunker_kwargs=(; separators=...)))). It's the simplest way to customize some step of the pipeline (eg, set a custom model with a model kwarg or prompt template with template kwarg).

The system is designed to be hackable and extensible at almost every entry point. If you want to customize the behavior of any step, you can do so by defining a new type and defining a new method for the step you're changing, eg,

julia
PromptingTools.Experimental.RAGTools: rerank\n\nstruct MyReranker <: AbstractReranker end\nrerank(::MyReranker, index, candidates) = ...

And then you would set the retrive step to use your custom MyReranker via reranker kwarg, eg, retrieve(....; reranker = MyReranker()) (or customize the main dispatching AbstractRetriever struct).

The overarching principles are:

RAG Diagram

The main functions are:

Prepare your document index with build_index:

Run E2E RAG with airag:

Retrieve relevant chunks with retrieve:

Generate an answer from relevant chunks with generate!:

To discover the currently available implementations, use subtypes function, eg, subtypes(AbstractReranker).

Passing Keyword Arguments

If you need to pass keyword arguments, use the nested kwargs corresponding to the dispatch type names (rephrase step, has rephraser dispatch type and rephraser_kwargs for its keyword arguments).

For example:

julia
cfg = RAGConfig(; retriever = AdvancedRetriever())\n\n# kwargs will be big and nested, let's prepare them upfront\n# we specify "custom" model for each component that calls LLM\nkwargs = (\n    retriever = AdvancedRetriever(),\n    retriever_kwargs = (;\n        top_k = 100,\n        top_n = 5,\n        # notice that this is effectively: retriever_kwargs/rephraser_kwargs/template\n        rephraser_kwargs = (;\n            template = :RAGQueryHyDE,\n            model = "custom")),\n    generator_kwargs = (;\n        # pass kwargs to `answer!` step defined by the `answerer` -> we're setting `answerer_kwargs`\n        answerer_kwargs = (;\n            model = "custom"),\n    # api_kwargs can be shared across all components\n    api_kwargs = (;\n        url = "http://localhost:8080")))\n\nresult = airag(cfg, index, question; kwargs...)

If you were one level deeper in the pipeline, working with retriever directly, you would pass:

julia
retriever_kwargs = (;\n    top_k = 100,\n    top_n = 5,\n    # notice that this is effectively: rephraser_kwargs/template\n    rephraser_kwargs = (;\n      template = :RAGQueryHyDE,\n      model = "custom"),\n  # api_kwargs can be shared across all components\n  api_kwargs = (;\n      url = "http://localhost:8080"))\n\nresult = retrieve(AdvancedRetriever(), index, question; retriever_kwargs...)

And going even deeper, you would provide the rephraser_kwargs directly to the rephrase step, eg,

julia
rephrase(SimpleRephraser(), question; model="custom", template = :RAGQueryHyDE, api_kwargs = (; url = "http://localhost:8080"))

Deepdive

Preparation Phase:

Retrieval Phase:

Generation Phase:

Note that all generation steps are mutating the RAGResult object.

See more details and corresponding functions and types in src/Experimental/RAGTools/rag_interface.jl.

References

# PromptingTools.Experimental.RAGTools.build_indexFunction.
julia
build_index(\n    indexer::AbstractIndexBuilder, files_or_docs::Vector{<:AbstractString};\n    verbose::Integer = 1,\n    extras::Union{Nothing, AbstractVector} = nothing,\n    index_id = gensym("ChunkEmbeddingsIndex"),\n    chunker::AbstractChunker = indexer.chunker,\n    chunker_kwargs::NamedTuple = NamedTuple(),\n    embedder::AbstractEmbedder = indexer.embedder,\n    embedder_kwargs::NamedTuple = NamedTuple(),\n    tagger::AbstractTagger = indexer.tagger,\n    tagger_kwargs::NamedTuple = NamedTuple(),\n    api_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0))

Build an INDEX for RAG (Retriever-Augmented Generation) applications from the provided file paths. INDEX is a object storing the document chunks and their embeddings (and potentially other information).

The function processes each file or document (depending on chunker), splits its content into chunks, embeds these chunks, optionally extracts metadata, and then combines this information into a retrievable index.

Define your own methods via indexer and its subcomponents (chunker, embedder, tagger).

Arguments

Returns

See also: ChunkEmbeddingsIndex, get_chunks, get_embeddings, get_tags, CandidateChunks, find_closest, find_tags, rerank, retrieve, generate!, airag

Examples

julia
# Default is loading a vector of strings and chunking them (`TextChunker()`)\nindex = build_index(SimpleIndexer(), texts; chunker_kwargs = (; max_length=10))\n\n# Another example with tags extraction, splitting only sentences and verbose output\n# Assuming `test_files` is a vector of file paths\nindexer = SimpleIndexer(chunker=FileChunker(), tagger=OpenTagger())\nindex = build_index(indexer, test_files; \n        chunker_kwargs(; separators=[". "]), verbose=true)

Notes

source

julia
build_index(\n    indexer::KeywordsIndexer, files_or_docs::Vector{<:AbstractString};\n    verbose::Integer = 1,\n    extras::Union{Nothing, AbstractVector} = nothing,\n    index_id = gensym("ChunkKeywordsIndex"),\n    chunker::AbstractChunker = indexer.chunker,\n    chunker_kwargs::NamedTuple = NamedTuple(),\n    processor::AbstractProcessor = indexer.processor,\n    processor_kwargs::NamedTuple = NamedTuple(),\n    tagger::AbstractTagger = indexer.tagger,\n    tagger_kwargs::NamedTuple = NamedTuple(),\n    api_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0))

Builds a ChunkKeywordsIndex from the provided files or documents to support keyword-based search (BM25).

source


# PromptingTools.Experimental.RAGTools.airagFunction.
julia
airag(cfg::AbstractRAGConfig, index::AbstractDocumentIndex;\n    question::AbstractString,\n    verbose::Integer = 1, return_all::Bool = false,\n    api_kwargs::NamedTuple = NamedTuple(),\n    retriever::AbstractRetriever = cfg.retriever,\n    retriever_kwargs::NamedTuple = NamedTuple(),\n    generator::AbstractGenerator = cfg.generator,\n    generator_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0))

High-level wrapper for Retrieval-Augmented Generation (RAG), it combines together the retrieve and generate! steps which you can customize if needed.

The simplest version first finds the relevant chunks in index for the question and then sends these chunks to the AI model to help with generating a response to the question.

To customize the components, replace the types (retriever, generator) of the corresponding step of the RAG pipeline - or go into sub-routines within the steps. Eg, use subtypes(AbstractRetriever) to find the available options.

Arguments

Returns

See also build_index, retrieve, generate!, RAGResult, getpropertynested, setpropertynested, merge_kwargs_nested, ChunkKeywordsIndex.

Examples

Using airag to get a response for a question:

julia
index = build_index(...)  # create an index\nquestion = "How to make a barplot in Makie.jl?"\nmsg = airag(index; question)

To understand the details of the RAG process, use return_all=true

julia
msg, details = airag(index; question, return_all = true)\n# details is a RAGDetails object with all the internal steps of the `airag` function

You can also pretty-print details to highlight generated text vs text that is supported by context. It also includes annotations of which context was used for each part of the response (where available).

julia
PT.pprint(details)

Example with advanced retrieval (with question rephrasing and reranking (requires COHERE_API_KEY). We will obtain top 100 chunks from embeddings (top_k) and top 5 chunks from reranking (top_n). In addition, it will be done with a "custom" locally-hosted model.

julia
cfg = RAGConfig(; retriever = AdvancedRetriever())\n\n# kwargs will be big and nested, let's prepare them upfront\n# we specify "custom" model for each component that calls LLM\nkwargs = (\n    retriever_kwargs = (;\n        top_k = 100,\n        top_n = 5,\n        rephraser_kwargs = (;\n            model = "custom"),\n        embedder_kwargs = (;\n            model = "custom"),\n        tagger_kwargs = (;\n            model = "custom")),\n    generator_kwargs = (;\n        answerer_kwargs = (;\n            model = "custom"),\n        refiner_kwargs = (;\n            model = "custom")),\n    api_kwargs = (;\n        url = "http://localhost:8080"))\n\nresult = airag(cfg, index, question; kwargs...)

If you want to use hybrid retrieval (embeddings + BM25), you can easily create an additional index based on keywords and pass them both into a MultiIndex.

You need to provide an explicit config, so the pipeline knows how to handle each index in the search similarity phase (finder).

julia
index = # your existing index\n\n# create the multi-index with the keywords index\nindex_keywords = ChunkKeywordsIndex(index)\nmulti_index = MultiIndex([index, index_keywords])\n\n# define the similarity measures for the indices that you have (same order)\nfinder = RT.MultiFinder([RT.CosineSimilarity(), RT.BM25Similarity()])\ncfg = RAGConfig(; retriever=AdvancedRetriever(; processor=RT.KeywordsProcessor(), finder))\n\n# Run the pipeline with the new hybrid retrieval (return the `RAGResult` to see the details)\nresult = airag(cfg, multi_index; question, return_all=true)\n\n# Pretty-print the result\nPT.pprint(result)

For easier manipulation of nested kwargs, see utilities getpropertynested, setpropertynested, merge_kwargs_nested.

source


# PromptingTools.Experimental.RAGTools.retrieveFunction.
julia
retrieve(retriever::AbstractRetriever,\n    index::AbstractChunkIndex,\n    question::AbstractString;\n    verbose::Integer = 1,\n    top_k::Integer = 100,\n    top_n::Integer = 5,\n    api_kwargs::NamedTuple = NamedTuple(),\n    rephraser::AbstractRephraser = retriever.rephraser,\n    rephraser_kwargs::NamedTuple = NamedTuple(),\n    embedder::AbstractEmbedder = retriever.embedder,\n    embedder_kwargs::NamedTuple = NamedTuple(),\n    processor::AbstractProcessor = retriever.processor,\n    processor_kwargs::NamedTuple = NamedTuple(),\n    finder::AbstractSimilarityFinder = retriever.finder,\n    finder_kwargs::NamedTuple = NamedTuple(),\n    tagger::AbstractTagger = retriever.tagger,\n    tagger_kwargs::NamedTuple = NamedTuple(),\n    filter::AbstractTagFilter = retriever.filter,\n    filter_kwargs::NamedTuple = NamedTuple(),\n    reranker::AbstractReranker = retriever.reranker,\n    reranker_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Retrieves the most relevant chunks from the index for the given question and returns them in the RAGResult object.

This is the main entry point for the retrieval stage of the RAG pipeline. It is often followed by generate! step.

Notes:

The arguments correspond to the steps of the retrieval process (rephrasing, embedding, finding similar docs, tagging, filtering by tags, reranking). You can customize each step by providing a new custom type that dispatches the corresponding function, eg, create your own type struct MyReranker<:AbstractReranker end and define the custom method for it rerank(::MyReranker,...) = ....

Note: Discover available retrieval sub-types for each step with subtypes(AbstractRephraser) and similar for other abstract types.

If you're using locally-hosted models, you can pass the api_kwargs with the url field set to the model's URL and make sure to provide corresponding model kwargs to rephraser, embedder, and tagger to use the custom models (they make AI calls).

Arguments

See also: SimpleRetriever, AdvancedRetriever, build_index, rephrase, get_embeddings, get_keywords, find_closest, get_tags, find_tags, rerank, RAGResult.

Examples

Find the 5 most relevant chunks from the index for the given question.

julia
# assumes you have an existing index `index`\nretriever = SimpleRetriever()\n\nresult = retrieve(retriever,\n    index,\n    "What is the capital of France?",\n    top_n = 5)\n\n# or use the default retriever (same as above)\nresult = retrieve(retriever,\n    index,\n    "What is the capital of France?",\n    top_n = 5)

Apply more advanced retrieval with question rephrasing and reranking (requires COHERE_API_KEY). We will obtain top 100 chunks from embeddings (top_k) and top 5 chunks from reranking (top_n).

julia
retriever = AdvancedRetriever()\n\nresult = retrieve(retriever, index, question; top_k=100, top_n=5)

You can use the retriever to customize your retrieval strategy or directly change the strategy types in the retrieve kwargs!

Example of using locally-hosted model hosted on localhost:8080:

julia
retriever = SimpleRetriever()\nresult = retrieve(retriever, index, question;\n    rephraser_kwargs = (; model = "custom"),\n    embedder_kwargs = (; model = "custom"),\n    tagger_kwargs = (; model = "custom"), api_kwargs = (;\n        url = "http://localhost:8080"))

source


# PromptingTools.Experimental.RAGTools.generate!Function.
julia
generate!(\n    generator::AbstractGenerator, index::AbstractDocumentIndex, result::AbstractRAGResult;\n    verbose::Integer = 1,\n    api_kwargs::NamedTuple = NamedTuple(),\n    contexter::AbstractContextBuilder = generator.contexter,\n    contexter_kwargs::NamedTuple = NamedTuple(),\n    answerer::AbstractAnswerer = generator.answerer,\n    answerer_kwargs::NamedTuple = NamedTuple(),\n    refiner::AbstractRefiner = generator.refiner,\n    refiner_kwargs::NamedTuple = NamedTuple(),\n    postprocessor::AbstractPostprocessor = generator.postprocessor,\n    postprocessor_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Generate the response using the provided generator and the index and result. It is the second step in the RAG pipeline (after retrieve)

Returns the mutated result with the result.final_answer and the full conversation saved in result.conversations[:final_answer].

Notes

Arguments

See also: retrieve, build_context!, ContextEnumerator, answer!, SimpleAnswerer, refine!, NoRefiner, SimpleRefiner, postprocess!, NoPostprocessor

Examples

julia
Assume we already have `index`\n\nquestion = "What are the best practices for parallel computing in Julia?"\n\n# Retrieve the relevant chunks - returns RAGResult\nresult = retrieve(index, question)\n\n# Generate the answer using the default generator, mutates the same result\nresult = generate!(index, result)

source


# PromptingTools.Experimental.RAGTools.annotate_supportFunction.
julia
annotate_support(annotater::TrigramAnnotater, answer::AbstractString,\n    context::AbstractVector; min_score::Float64 = 0.5,\n    skip_trigrams::Bool = true, hashed::Bool = true,\n    sources::Union{Nothing, AbstractVector{<:AbstractString}} = nothing,\n    min_source_score::Float64 = 0.25,\n    add_sources::Bool = true,\n    add_scores::Bool = true, kwargs...)

Annotates the answer with the overlap/what's supported in context and returns the annotated tree of nodes representing the answer

Returns a "root" node with children nodes representing the sentences/code blocks in the answer. Only the "leaf" nodes are to be printed (to avoid duplication), "leaf" nodes are those with NO children.

Default logic:

Arguments

Example

julia
annotater = TrigramAnnotater()\ncontext = [\n    "This is a test context.", "Another context sentence.", "Final piece of context."]\nanswer = "This is a test context. Another context sentence."\n\nannotated_root = annotate_support(annotater, answer, context)\npprint(annotated_root) # pretty print the annotated tree

source

julia
annotate_support(\n    annotater::TrigramAnnotater, result::AbstractRAGResult; min_score::Float64 = 0.5,\n    skip_trigrams::Bool = true, hashed::Bool = true,\n    min_source_score::Float64 = 0.25,\n    add_sources::Bool = true,\n    add_scores::Bool = true, kwargs...)

Dispatch for annotate_support for AbstractRAGResult type. It extracts the final_answer and context from the result and calls annotate_support with them.

See annotate_support for more details.

Example

julia
res = RAGResult(; question = "", final_answer = "This is a test.",\n    context = ["Test context.", "Completely different"])\nannotated_root = annotate_support(annotater, res)\nPT.pprint(annotated_root)

source


# PromptingTools.Experimental.RAGTools.build_qa_evalsFunction.
julia
build_qa_evals(doc_chunks::Vector{<:AbstractString}, sources::Vector{<:AbstractString};\n               model=PT.MODEL_CHAT, instructions="None.", qa_template::Symbol=:RAGCreateQAFromContext, \n               verbose::Bool=true, api_kwargs::NamedTuple = NamedTuple(), kwargs...) -> Vector{QAEvalItem}

Create a collection of question and answer evaluations (QAEvalItem) from document chunks and sources. This function generates Q&A pairs based on the provided document chunks, using a specified AI model and template.

Arguments

Returns

Vector{QAEvalItem}: A vector of QAEvalItem structs, each containing a source, context, question, and answer. Invalid or empty items are filtered out.

Notes

Examples

Creating Q&A evaluations from a set of document chunks:

julia
doc_chunks = ["Text from document 1", "Text from document 2"]\nsources = ["source1", "source2"]\nqa_evals = build_qa_evals(doc_chunks, sources)

source


', 88) ])); } const rag_tools_intro = /* @__PURE__ */ _export_sfc(_sfc_main, [["render", _sfc_render]]); diff --git a/previews/PR218/assets/extra_tools_rag_tools_intro.md.CJjL2hwq.js b/previews/PR218/assets/extra_tools_rag_tools_intro.md.oi28ZdI4.lean.js similarity index 99% rename from previews/PR218/assets/extra_tools_rag_tools_intro.md.CJjL2hwq.js rename to previews/PR218/assets/extra_tools_rag_tools_intro.md.oi28ZdI4.lean.js index f1dfa7877..b778ea87d 100644 --- a/previews/PR218/assets/extra_tools_rag_tools_intro.md.CJjL2hwq.js +++ b/previews/PR218/assets/extra_tools_rag_tools_intro.md.oi28ZdI4.lean.js @@ -5,7 +5,7 @@ const __pageData = JSON.parse('{"title":"RAG Tools Introduction","description":" const _sfc_main = { name: "extra_tools/rag_tools_intro.md" }; function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { return openBlock(), createElementBlock("div", null, _cache[0] || (_cache[0] = [ - createStaticVNode('

RAG Tools Introduction

RAGTools is an experimental module that provides a set of utilities for building Retrieval-Augmented Generation (RAG) applications, ie, applications that generate answers by combining knowledge of the underlying AI model with the information from the user's knowledge base.

It is designed to be powerful and flexible, allowing you to build RAG applications with minimal effort. Extend any step of the pipeline with your own custom code (see the RAG Interface section), or use the provided defaults to get started quickly.

Once the API stabilizes (near term), we hope to carve it out into a separate package.

Import the module as follows:

julia
# required dependencies to load the necessary extensions!!!\nusing LinearAlgebra, SparseArrays, Unicode, Snowball\nusing PromptingTools.Experimental.RAGTools\n# to access unexported functionality\nconst RT = PromptingTools.Experimental.RAGTools

Highlights

The main functions to be aware of are:

The hope is to provide a modular and easily extensible set of tools for building RAG applications in Julia. Feel free to open an issue or ask in the #generative-ai channel in the JuliaLang Slack if you have a specific need.

Examples

Let's build an index, we need to provide a starter list of documents:

julia
sentences = [\n    "Find the most comprehensive guide on Julia programming language for beginners published in 2023.",\n    "Search for the latest advancements in quantum computing using Julia language.",\n    "How to implement machine learning algorithms in Julia with examples.",\n    "Looking for performance comparison between Julia, Python, and R for data analysis.",\n    "Find Julia language tutorials focusing on high-performance scientific computing.",\n    "Search for the top Julia language packages for data visualization and their documentation.",\n    "How to set up a Julia development environment on Windows 10.",\n    "Discover the best practices for parallel computing in Julia.",\n    "Search for case studies of large-scale data processing using Julia.",\n    "Find comprehensive resources for mastering metaprogramming in Julia.",\n    "Looking for articles on the advantages of using Julia for statistical modeling.",\n    "How to contribute to the Julia open-source community: A step-by-step guide.",\n    "Find the comparison of numerical accuracy between Julia and MATLAB.",\n    "Looking for the latest Julia language updates and their impact on AI research.",\n    "How to efficiently handle big data with Julia: Techniques and libraries.",\n    "Discover how Julia integrates with other programming languages and tools.",\n    "Search for Julia-based frameworks for developing web applications.",\n    "Find tutorials on creating interactive dashboards with Julia.",\n    "How to use Julia for natural language processing and text analysis.",\n    "Discover the role of Julia in the future of computational finance and econometrics."\n]

Let's index these "documents":

julia
index = build_index(sentences; chunker_kwargs=(; sources=map(i -> "Doc$i", 1:length(sentences))))

This would be equivalent to the following index = build_index(SimpleIndexer(), sentences) which dispatches to the default implementation of each step via the SimpleIndexer struct. We provide these default implementations for the main functions as an optional argument - no need to provide them if you're running the default pipeline.

Notice that we have provided a chunker_kwargs argument to the build_index function. These will be kwargs passed to chunker step.

Now let's generate an answer to a question.

  1. Run end-to-end RAG (retrieve + generate!), return AIMessage
julia
question = "What are the best practices for parallel computing in Julia?"\n\nmsg = airag(index; question) # short for airag(RAGConfig(), index; question)\n## Output:\n## [ Info: Done with RAG. Total cost: \\$0.0\n## AIMessage("Some best practices for parallel computing in Julia include us...
  1. Explore what's happening under the hood by changing the return type - RAGResult contains all intermediate steps.
julia
result = airag(index; question, return_all=true)\n## RAGResult\n##   question: String "What are the best practices for parallel computing in Julia?"\n##   rephrased_questions: Array{String}((1,))\n##   answer: SubString{String}\n##   final_answer: SubString{String}\n##   context: Array{String}((5,))\n##   sources: Array{String}((5,))\n##   emb_candidates: CandidateChunks{Int64, Float32}\n##   tag_candidates: CandidateChunks{Int64, Float32}\n##   filtered_candidates: CandidateChunks{Int64, Float32}\n##   reranked_candidates: CandidateChunks{Int64, Float32}\n##   conversations: Dict{Symbol, Vector{<:PromptingTools.AbstractMessage}}

You can still get the message from the result, see result.conversations[:final_answer] (the dictionary keys correspond to the function names of those steps).

  1. If you need to customize it, break the pipeline into its sub-steps: retrieve and generate - RAGResult serves as the intermediate result.
julia
# Retrieve which chunks are relevant to the question\nresult = retrieve(index, question)\n# Generate an answer\nresult = generate!(index, result)

You can leverage a pretty-printing system with pprint where we automatically annotate the support of the answer by the chunks we provided to the model. It is configurable and you can select only some of its functions (eg, scores, sources).

julia
pprint(result)

You'll see the following in REPL but with COLOR highlighting in the terminal.

plaintext
--------------------\nQUESTION(s)\n--------------------\n- What are the best practices for parallel computing in Julia?\n\n--------------------\nANSWER\n--------------------\nSome of the best practices for parallel computing in Julia include:[1,0.7]\n- Using [3,0.4]`@threads` for simple parallelism[1,0.34]\n- Utilizing `Distributed` module for more complex parallel tasks[1,0.19]\n- Avoiding excessive memory allocation\n- Considering task granularity for efficient workload distribution\n\n--------------------\nSOURCES\n--------------------\n1. Doc8\n2. Doc15\n3. Doc5\n4. Doc2\n5. Doc9

See ?print_html for the HTML version of the pretty-printing and styling system, eg, when you want to display the results in a web application based on Genie.jl/Stipple.jl.

How to read the output

Want more?

See examples/building_RAG.jl for one more example.

RAG Interface

System Overview

This system is designed for information retrieval and response generation, structured in three main phases:

The corresponding functions are build_index, retrieve, and generate!, respectively. Here is the high-level diagram that shows the signature of the main functions:

Notice that the first argument is a custom type for multiple dispatch. In addition, observe the "kwargs" names, that's how the keyword arguments for each function are passed down from the higher-level functions (eg, build_index(...; chunker_kwargs=(; separators=...)))). It's the simplest way to customize some step of the pipeline (eg, set a custom model with a model kwarg or prompt template with template kwarg).

The system is designed to be hackable and extensible at almost every entry point. If you want to customize the behavior of any step, you can do so by defining a new type and defining a new method for the step you're changing, eg,

julia
PromptingTools.Experimental.RAGTools: rerank\n\nstruct MyReranker <: AbstractReranker end\nrerank(::MyReranker, index, candidates) = ...

And then you would set the retrive step to use your custom MyReranker via reranker kwarg, eg, retrieve(....; reranker = MyReranker()) (or customize the main dispatching AbstractRetriever struct).

The overarching principles are:

RAG Diagram

The main functions are:

Prepare your document index with build_index:

Run E2E RAG with airag:

Retrieve relevant chunks with retrieve:

Generate an answer from relevant chunks with generate!:

To discover the currently available implementations, use subtypes function, eg, subtypes(AbstractReranker).

Passing Keyword Arguments

If you need to pass keyword arguments, use the nested kwargs corresponding to the dispatch type names (rephrase step, has rephraser dispatch type and rephraser_kwargs for its keyword arguments).

For example:

julia
cfg = RAGConfig(; retriever = AdvancedRetriever())\n\n# kwargs will be big and nested, let's prepare them upfront\n# we specify "custom" model for each component that calls LLM\nkwargs = (\n    retriever = AdvancedRetriever(),\n    retriever_kwargs = (;\n        top_k = 100,\n        top_n = 5,\n        # notice that this is effectively: retriever_kwargs/rephraser_kwargs/template\n        rephraser_kwargs = (;\n            template = :RAGQueryHyDE,\n            model = "custom")),\n    generator_kwargs = (;\n        # pass kwargs to `answer!` step defined by the `answerer` -> we're setting `answerer_kwargs`\n        answerer_kwargs = (;\n            model = "custom"),\n    # api_kwargs can be shared across all components\n    api_kwargs = (;\n        url = "http://localhost:8080")))\n\nresult = airag(cfg, index, question; kwargs...)

If you were one level deeper in the pipeline, working with retriever directly, you would pass:

julia
retriever_kwargs = (;\n    top_k = 100,\n    top_n = 5,\n    # notice that this is effectively: rephraser_kwargs/template\n    rephraser_kwargs = (;\n      template = :RAGQueryHyDE,\n      model = "custom"),\n  # api_kwargs can be shared across all components\n  api_kwargs = (;\n      url = "http://localhost:8080"))\n\nresult = retrieve(AdvancedRetriever(), index, question; retriever_kwargs...)

And going even deeper, you would provide the rephraser_kwargs directly to the rephrase step, eg,

julia
rephrase(SimpleRephraser(), question; model="custom", template = :RAGQueryHyDE, api_kwargs = (; url = "http://localhost:8080"))

Deepdive

Preparation Phase:

Retrieval Phase:

Generation Phase:

Note that all generation steps are mutating the RAGResult object.

See more details and corresponding functions and types in src/Experimental/RAGTools/rag_interface.jl.

References

# PromptingTools.Experimental.RAGTools.build_indexFunction.
julia
build_index(\n    indexer::AbstractIndexBuilder, files_or_docs::Vector{<:AbstractString};\n    verbose::Integer = 1,\n    extras::Union{Nothing, AbstractVector} = nothing,\n    index_id = gensym("ChunkEmbeddingsIndex"),\n    chunker::AbstractChunker = indexer.chunker,\n    chunker_kwargs::NamedTuple = NamedTuple(),\n    embedder::AbstractEmbedder = indexer.embedder,\n    embedder_kwargs::NamedTuple = NamedTuple(),\n    tagger::AbstractTagger = indexer.tagger,\n    tagger_kwargs::NamedTuple = NamedTuple(),\n    api_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0))

Build an INDEX for RAG (Retriever-Augmented Generation) applications from the provided file paths. INDEX is a object storing the document chunks and their embeddings (and potentially other information).

The function processes each file or document (depending on chunker), splits its content into chunks, embeds these chunks, optionally extracts metadata, and then combines this information into a retrievable index.

Define your own methods via indexer and its subcomponents (chunker, embedder, tagger).

Arguments

Returns

See also: ChunkEmbeddingsIndex, get_chunks, get_embeddings, get_tags, CandidateChunks, find_closest, find_tags, rerank, retrieve, generate!, airag

Examples

julia
# Default is loading a vector of strings and chunking them (`TextChunker()`)\nindex = build_index(SimpleIndexer(), texts; chunker_kwargs = (; max_length=10))\n\n# Another example with tags extraction, splitting only sentences and verbose output\n# Assuming `test_files` is a vector of file paths\nindexer = SimpleIndexer(chunker=FileChunker(), tagger=OpenTagger())\nindex = build_index(indexer, test_files; \n        chunker_kwargs(; separators=[". "]), verbose=true)

Notes

source

julia
build_index(\n    indexer::KeywordsIndexer, files_or_docs::Vector{<:AbstractString};\n    verbose::Integer = 1,\n    extras::Union{Nothing, AbstractVector} = nothing,\n    index_id = gensym("ChunkKeywordsIndex"),\n    chunker::AbstractChunker = indexer.chunker,\n    chunker_kwargs::NamedTuple = NamedTuple(),\n    processor::AbstractProcessor = indexer.processor,\n    processor_kwargs::NamedTuple = NamedTuple(),\n    tagger::AbstractTagger = indexer.tagger,\n    tagger_kwargs::NamedTuple = NamedTuple(),\n    api_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0))

Builds a ChunkKeywordsIndex from the provided files or documents to support keyword-based search (BM25).

source


# PromptingTools.Experimental.RAGTools.airagFunction.
julia
airag(cfg::AbstractRAGConfig, index::AbstractDocumentIndex;\n    question::AbstractString,\n    verbose::Integer = 1, return_all::Bool = false,\n    api_kwargs::NamedTuple = NamedTuple(),\n    retriever::AbstractRetriever = cfg.retriever,\n    retriever_kwargs::NamedTuple = NamedTuple(),\n    generator::AbstractGenerator = cfg.generator,\n    generator_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0))

High-level wrapper for Retrieval-Augmented Generation (RAG), it combines together the retrieve and generate! steps which you can customize if needed.

The simplest version first finds the relevant chunks in index for the question and then sends these chunks to the AI model to help with generating a response to the question.

To customize the components, replace the types (retriever, generator) of the corresponding step of the RAG pipeline - or go into sub-routines within the steps. Eg, use subtypes(AbstractRetriever) to find the available options.

Arguments

Returns

See also build_index, retrieve, generate!, RAGResult, getpropertynested, setpropertynested, merge_kwargs_nested, ChunkKeywordsIndex.

Examples

Using airag to get a response for a question:

julia
index = build_index(...)  # create an index\nquestion = "How to make a barplot in Makie.jl?"\nmsg = airag(index; question)

To understand the details of the RAG process, use return_all=true

julia
msg, details = airag(index; question, return_all = true)\n# details is a RAGDetails object with all the internal steps of the `airag` function

You can also pretty-print details to highlight generated text vs text that is supported by context. It also includes annotations of which context was used for each part of the response (where available).

julia
PT.pprint(details)

Example with advanced retrieval (with question rephrasing and reranking (requires COHERE_API_KEY). We will obtain top 100 chunks from embeddings (top_k) and top 5 chunks from reranking (top_n). In addition, it will be done with a "custom" locally-hosted model.

julia
cfg = RAGConfig(; retriever = AdvancedRetriever())\n\n# kwargs will be big and nested, let's prepare them upfront\n# we specify "custom" model for each component that calls LLM\nkwargs = (\n    retriever_kwargs = (;\n        top_k = 100,\n        top_n = 5,\n        rephraser_kwargs = (;\n            model = "custom"),\n        embedder_kwargs = (;\n            model = "custom"),\n        tagger_kwargs = (;\n            model = "custom")),\n    generator_kwargs = (;\n        answerer_kwargs = (;\n            model = "custom"),\n        refiner_kwargs = (;\n            model = "custom")),\n    api_kwargs = (;\n        url = "http://localhost:8080"))\n\nresult = airag(cfg, index, question; kwargs...)

If you want to use hybrid retrieval (embeddings + BM25), you can easily create an additional index based on keywords and pass them both into a MultiIndex.

You need to provide an explicit config, so the pipeline knows how to handle each index in the search similarity phase (finder).

julia
index = # your existing index\n\n# create the multi-index with the keywords index\nindex_keywords = ChunkKeywordsIndex(index)\nmulti_index = MultiIndex([index, index_keywords])\n\n# define the similarity measures for the indices that you have (same order)\nfinder = RT.MultiFinder([RT.CosineSimilarity(), RT.BM25Similarity()])\ncfg = RAGConfig(; retriever=AdvancedRetriever(; processor=RT.KeywordsProcessor(), finder))\n\n# Run the pipeline with the new hybrid retrieval (return the `RAGResult` to see the details)\nresult = airag(cfg, multi_index; question, return_all=true)\n\n# Pretty-print the result\nPT.pprint(result)

For easier manipulation of nested kwargs, see utilities getpropertynested, setpropertynested, merge_kwargs_nested.

source


# PromptingTools.Experimental.RAGTools.retrieveFunction.
julia
retrieve(retriever::AbstractRetriever,\n    index::AbstractChunkIndex,\n    question::AbstractString;\n    verbose::Integer = 1,\n    top_k::Integer = 100,\n    top_n::Integer = 5,\n    api_kwargs::NamedTuple = NamedTuple(),\n    rephraser::AbstractRephraser = retriever.rephraser,\n    rephraser_kwargs::NamedTuple = NamedTuple(),\n    embedder::AbstractEmbedder = retriever.embedder,\n    embedder_kwargs::NamedTuple = NamedTuple(),\n    processor::AbstractProcessor = retriever.processor,\n    processor_kwargs::NamedTuple = NamedTuple(),\n    finder::AbstractSimilarityFinder = retriever.finder,\n    finder_kwargs::NamedTuple = NamedTuple(),\n    tagger::AbstractTagger = retriever.tagger,\n    tagger_kwargs::NamedTuple = NamedTuple(),\n    filter::AbstractTagFilter = retriever.filter,\n    filter_kwargs::NamedTuple = NamedTuple(),\n    reranker::AbstractReranker = retriever.reranker,\n    reranker_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Retrieves the most relevant chunks from the index for the given question and returns them in the RAGResult object.

This is the main entry point for the retrieval stage of the RAG pipeline. It is often followed by generate! step.

Notes:

The arguments correspond to the steps of the retrieval process (rephrasing, embedding, finding similar docs, tagging, filtering by tags, reranking). You can customize each step by providing a new custom type that dispatches the corresponding function, eg, create your own type struct MyReranker<:AbstractReranker end and define the custom method for it rerank(::MyReranker,...) = ....

Note: Discover available retrieval sub-types for each step with subtypes(AbstractRephraser) and similar for other abstract types.

If you're using locally-hosted models, you can pass the api_kwargs with the url field set to the model's URL and make sure to provide corresponding model kwargs to rephraser, embedder, and tagger to use the custom models (they make AI calls).

Arguments

See also: SimpleRetriever, AdvancedRetriever, build_index, rephrase, get_embeddings, get_keywords, find_closest, get_tags, find_tags, rerank, RAGResult.

Examples

Find the 5 most relevant chunks from the index for the given question.

julia
# assumes you have an existing index `index`\nretriever = SimpleRetriever()\n\nresult = retrieve(retriever,\n    index,\n    "What is the capital of France?",\n    top_n = 5)\n\n# or use the default retriever (same as above)\nresult = retrieve(retriever,\n    index,\n    "What is the capital of France?",\n    top_n = 5)

Apply more advanced retrieval with question rephrasing and reranking (requires COHERE_API_KEY). We will obtain top 100 chunks from embeddings (top_k) and top 5 chunks from reranking (top_n).

julia
retriever = AdvancedRetriever()\n\nresult = retrieve(retriever, index, question; top_k=100, top_n=5)

You can use the retriever to customize your retrieval strategy or directly change the strategy types in the retrieve kwargs!

Example of using locally-hosted model hosted on localhost:8080:

julia
retriever = SimpleRetriever()\nresult = retrieve(retriever, index, question;\n    rephraser_kwargs = (; model = "custom"),\n    embedder_kwargs = (; model = "custom"),\n    tagger_kwargs = (; model = "custom"), api_kwargs = (;\n        url = "http://localhost:8080"))

source


# PromptingTools.Experimental.RAGTools.generate!Function.
julia
generate!(\n    generator::AbstractGenerator, index::AbstractDocumentIndex, result::AbstractRAGResult;\n    verbose::Integer = 1,\n    api_kwargs::NamedTuple = NamedTuple(),\n    contexter::AbstractContextBuilder = generator.contexter,\n    contexter_kwargs::NamedTuple = NamedTuple(),\n    answerer::AbstractAnswerer = generator.answerer,\n    answerer_kwargs::NamedTuple = NamedTuple(),\n    refiner::AbstractRefiner = generator.refiner,\n    refiner_kwargs::NamedTuple = NamedTuple(),\n    postprocessor::AbstractPostprocessor = generator.postprocessor,\n    postprocessor_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Generate the response using the provided generator and the index and result. It is the second step in the RAG pipeline (after retrieve)

Returns the mutated result with the result.final_answer and the full conversation saved in result.conversations[:final_answer].

Notes

Arguments

See also: retrieve, build_context!, ContextEnumerator, answer!, SimpleAnswerer, refine!, NoRefiner, SimpleRefiner, postprocess!, NoPostprocessor

Examples

julia
Assume we already have `index`\n\nquestion = "What are the best practices for parallel computing in Julia?"\n\n# Retrieve the relevant chunks - returns RAGResult\nresult = retrieve(index, question)\n\n# Generate the answer using the default generator, mutates the same result\nresult = generate!(index, result)

source


# PromptingTools.Experimental.RAGTools.annotate_supportFunction.
julia
annotate_support(annotater::TrigramAnnotater, answer::AbstractString,\n    context::AbstractVector; min_score::Float64 = 0.5,\n    skip_trigrams::Bool = true, hashed::Bool = true,\n    sources::Union{Nothing, AbstractVector{<:AbstractString}} = nothing,\n    min_source_score::Float64 = 0.25,\n    add_sources::Bool = true,\n    add_scores::Bool = true, kwargs...)

Annotates the answer with the overlap/what's supported in context and returns the annotated tree of nodes representing the answer

Returns a "root" node with children nodes representing the sentences/code blocks in the answer. Only the "leaf" nodes are to be printed (to avoid duplication), "leaf" nodes are those with NO children.

Default logic:

Arguments

Example

julia
annotater = TrigramAnnotater()\ncontext = [\n    "This is a test context.", "Another context sentence.", "Final piece of context."]\nanswer = "This is a test context. Another context sentence."\n\nannotated_root = annotate_support(annotater, answer, context)\npprint(annotated_root) # pretty print the annotated tree

source

julia
annotate_support(\n    annotater::TrigramAnnotater, result::AbstractRAGResult; min_score::Float64 = 0.5,\n    skip_trigrams::Bool = true, hashed::Bool = true,\n    min_source_score::Float64 = 0.25,\n    add_sources::Bool = true,\n    add_scores::Bool = true, kwargs...)

Dispatch for annotate_support for AbstractRAGResult type. It extracts the final_answer and context from the result and calls annotate_support with them.

See annotate_support for more details.

Example

julia
res = RAGResult(; question = "", final_answer = "This is a test.",\n    context = ["Test context.", "Completely different"])\nannotated_root = annotate_support(annotater, res)\nPT.pprint(annotated_root)

source


# PromptingTools.Experimental.RAGTools.build_qa_evalsFunction.
julia
build_qa_evals(doc_chunks::Vector{<:AbstractString}, sources::Vector{<:AbstractString};\n               model=PT.MODEL_CHAT, instructions="None.", qa_template::Symbol=:RAGCreateQAFromContext, \n               verbose::Bool=true, api_kwargs::NamedTuple = NamedTuple(), kwargs...) -> Vector{QAEvalItem}

Create a collection of question and answer evaluations (QAEvalItem) from document chunks and sources. This function generates Q&A pairs based on the provided document chunks, using a specified AI model and template.

Arguments

Returns

Vector{QAEvalItem}: A vector of QAEvalItem structs, each containing a source, context, question, and answer. Invalid or empty items are filtered out.

Notes

Examples

Creating Q&A evaluations from a set of document chunks:

julia
doc_chunks = ["Text from document 1", "Text from document 2"]\nsources = ["source1", "source2"]\nqa_evals = build_qa_evals(doc_chunks, sources)

source


', 88) + createStaticVNode('

RAG Tools Introduction

RAGTools is an experimental module that provides a set of utilities for building Retrieval-Augmented Generation (RAG) applications, ie, applications that generate answers by combining knowledge of the underlying AI model with the information from the user's knowledge base.

It is designed to be powerful and flexible, allowing you to build RAG applications with minimal effort. Extend any step of the pipeline with your own custom code (see the RAG Interface section), or use the provided defaults to get started quickly.

Once the API stabilizes (near term), we hope to carve it out into a separate package.

Import the module as follows:

julia
# required dependencies to load the necessary extensions!!!\nusing LinearAlgebra, SparseArrays, Unicode, Snowball\nusing PromptingTools.Experimental.RAGTools\n# to access unexported functionality\nconst RT = PromptingTools.Experimental.RAGTools

Highlights

The main functions to be aware of are:

The hope is to provide a modular and easily extensible set of tools for building RAG applications in Julia. Feel free to open an issue or ask in the #generative-ai channel in the JuliaLang Slack if you have a specific need.

Examples

Let's build an index, we need to provide a starter list of documents:

julia
sentences = [\n    "Find the most comprehensive guide on Julia programming language for beginners published in 2023.",\n    "Search for the latest advancements in quantum computing using Julia language.",\n    "How to implement machine learning algorithms in Julia with examples.",\n    "Looking for performance comparison between Julia, Python, and R for data analysis.",\n    "Find Julia language tutorials focusing on high-performance scientific computing.",\n    "Search for the top Julia language packages for data visualization and their documentation.",\n    "How to set up a Julia development environment on Windows 10.",\n    "Discover the best practices for parallel computing in Julia.",\n    "Search for case studies of large-scale data processing using Julia.",\n    "Find comprehensive resources for mastering metaprogramming in Julia.",\n    "Looking for articles on the advantages of using Julia for statistical modeling.",\n    "How to contribute to the Julia open-source community: A step-by-step guide.",\n    "Find the comparison of numerical accuracy between Julia and MATLAB.",\n    "Looking for the latest Julia language updates and their impact on AI research.",\n    "How to efficiently handle big data with Julia: Techniques and libraries.",\n    "Discover how Julia integrates with other programming languages and tools.",\n    "Search for Julia-based frameworks for developing web applications.",\n    "Find tutorials on creating interactive dashboards with Julia.",\n    "How to use Julia for natural language processing and text analysis.",\n    "Discover the role of Julia in the future of computational finance and econometrics."\n]

Let's index these "documents":

julia
index = build_index(sentences; chunker_kwargs=(; sources=map(i -> "Doc$i", 1:length(sentences))))

This would be equivalent to the following index = build_index(SimpleIndexer(), sentences) which dispatches to the default implementation of each step via the SimpleIndexer struct. We provide these default implementations for the main functions as an optional argument - no need to provide them if you're running the default pipeline.

Notice that we have provided a chunker_kwargs argument to the build_index function. These will be kwargs passed to chunker step.

Now let's generate an answer to a question.

  1. Run end-to-end RAG (retrieve + generate!), return AIMessage
julia
question = "What are the best practices for parallel computing in Julia?"\n\nmsg = airag(index; question) # short for airag(RAGConfig(), index; question)\n## Output:\n## [ Info: Done with RAG. Total cost: \\$0.0\n## AIMessage("Some best practices for parallel computing in Julia include us...
  1. Explore what's happening under the hood by changing the return type - RAGResult contains all intermediate steps.
julia
result = airag(index; question, return_all=true)\n## RAGResult\n##   question: String "What are the best practices for parallel computing in Julia?"\n##   rephrased_questions: Array{String}((1,))\n##   answer: SubString{String}\n##   final_answer: SubString{String}\n##   context: Array{String}((5,))\n##   sources: Array{String}((5,))\n##   emb_candidates: CandidateChunks{Int64, Float32}\n##   tag_candidates: CandidateChunks{Int64, Float32}\n##   filtered_candidates: CandidateChunks{Int64, Float32}\n##   reranked_candidates: CandidateChunks{Int64, Float32}\n##   conversations: Dict{Symbol, Vector{<:PromptingTools.AbstractMessage}}

You can still get the message from the result, see result.conversations[:final_answer] (the dictionary keys correspond to the function names of those steps).

  1. If you need to customize it, break the pipeline into its sub-steps: retrieve and generate - RAGResult serves as the intermediate result.
julia
# Retrieve which chunks are relevant to the question\nresult = retrieve(index, question)\n# Generate an answer\nresult = generate!(index, result)

You can leverage a pretty-printing system with pprint where we automatically annotate the support of the answer by the chunks we provided to the model. It is configurable and you can select only some of its functions (eg, scores, sources).

julia
pprint(result)

You'll see the following in REPL but with COLOR highlighting in the terminal.

plaintext
--------------------\nQUESTION(s)\n--------------------\n- What are the best practices for parallel computing in Julia?\n\n--------------------\nANSWER\n--------------------\nSome of the best practices for parallel computing in Julia include:[1,0.7]\n- Using [3,0.4]`@threads` for simple parallelism[1,0.34]\n- Utilizing `Distributed` module for more complex parallel tasks[1,0.19]\n- Avoiding excessive memory allocation\n- Considering task granularity for efficient workload distribution\n\n--------------------\nSOURCES\n--------------------\n1. Doc8\n2. Doc15\n3. Doc5\n4. Doc2\n5. Doc9

See ?print_html for the HTML version of the pretty-printing and styling system, eg, when you want to display the results in a web application based on Genie.jl/Stipple.jl.

How to read the output

Want more?

See examples/building_RAG.jl for one more example.

RAG Interface

System Overview

This system is designed for information retrieval and response generation, structured in three main phases:

The corresponding functions are build_index, retrieve, and generate!, respectively. Here is the high-level diagram that shows the signature of the main functions:

Notice that the first argument is a custom type for multiple dispatch. In addition, observe the "kwargs" names, that's how the keyword arguments for each function are passed down from the higher-level functions (eg, build_index(...; chunker_kwargs=(; separators=...)))). It's the simplest way to customize some step of the pipeline (eg, set a custom model with a model kwarg or prompt template with template kwarg).

The system is designed to be hackable and extensible at almost every entry point. If you want to customize the behavior of any step, you can do so by defining a new type and defining a new method for the step you're changing, eg,

julia
PromptingTools.Experimental.RAGTools: rerank\n\nstruct MyReranker <: AbstractReranker end\nrerank(::MyReranker, index, candidates) = ...

And then you would set the retrive step to use your custom MyReranker via reranker kwarg, eg, retrieve(....; reranker = MyReranker()) (or customize the main dispatching AbstractRetriever struct).

The overarching principles are:

RAG Diagram

The main functions are:

Prepare your document index with build_index:

Run E2E RAG with airag:

Retrieve relevant chunks with retrieve:

Generate an answer from relevant chunks with generate!:

To discover the currently available implementations, use subtypes function, eg, subtypes(AbstractReranker).

Passing Keyword Arguments

If you need to pass keyword arguments, use the nested kwargs corresponding to the dispatch type names (rephrase step, has rephraser dispatch type and rephraser_kwargs for its keyword arguments).

For example:

julia
cfg = RAGConfig(; retriever = AdvancedRetriever())\n\n# kwargs will be big and nested, let's prepare them upfront\n# we specify "custom" model for each component that calls LLM\nkwargs = (\n    retriever = AdvancedRetriever(),\n    retriever_kwargs = (;\n        top_k = 100,\n        top_n = 5,\n        # notice that this is effectively: retriever_kwargs/rephraser_kwargs/template\n        rephraser_kwargs = (;\n            template = :RAGQueryHyDE,\n            model = "custom")),\n    generator_kwargs = (;\n        # pass kwargs to `answer!` step defined by the `answerer` -> we're setting `answerer_kwargs`\n        answerer_kwargs = (;\n            model = "custom"),\n    # api_kwargs can be shared across all components\n    api_kwargs = (;\n        url = "http://localhost:8080")))\n\nresult = airag(cfg, index, question; kwargs...)

If you were one level deeper in the pipeline, working with retriever directly, you would pass:

julia
retriever_kwargs = (;\n    top_k = 100,\n    top_n = 5,\n    # notice that this is effectively: rephraser_kwargs/template\n    rephraser_kwargs = (;\n      template = :RAGQueryHyDE,\n      model = "custom"),\n  # api_kwargs can be shared across all components\n  api_kwargs = (;\n      url = "http://localhost:8080"))\n\nresult = retrieve(AdvancedRetriever(), index, question; retriever_kwargs...)

And going even deeper, you would provide the rephraser_kwargs directly to the rephrase step, eg,

julia
rephrase(SimpleRephraser(), question; model="custom", template = :RAGQueryHyDE, api_kwargs = (; url = "http://localhost:8080"))

Deepdive

Preparation Phase:

Retrieval Phase:

Generation Phase:

Note that all generation steps are mutating the RAGResult object.

See more details and corresponding functions and types in src/Experimental/RAGTools/rag_interface.jl.

References

# PromptingTools.Experimental.RAGTools.build_indexFunction.
julia
build_index(\n    indexer::AbstractIndexBuilder, files_or_docs::Vector{<:AbstractString};\n    verbose::Integer = 1,\n    extras::Union{Nothing, AbstractVector} = nothing,\n    index_id = gensym("ChunkEmbeddingsIndex"),\n    chunker::AbstractChunker = indexer.chunker,\n    chunker_kwargs::NamedTuple = NamedTuple(),\n    embedder::AbstractEmbedder = indexer.embedder,\n    embedder_kwargs::NamedTuple = NamedTuple(),\n    tagger::AbstractTagger = indexer.tagger,\n    tagger_kwargs::NamedTuple = NamedTuple(),\n    api_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0))

Build an INDEX for RAG (Retriever-Augmented Generation) applications from the provided file paths. INDEX is a object storing the document chunks and their embeddings (and potentially other information).

The function processes each file or document (depending on chunker), splits its content into chunks, embeds these chunks, optionally extracts metadata, and then combines this information into a retrievable index.

Define your own methods via indexer and its subcomponents (chunker, embedder, tagger).

Arguments

Returns

See also: ChunkEmbeddingsIndex, get_chunks, get_embeddings, get_tags, CandidateChunks, find_closest, find_tags, rerank, retrieve, generate!, airag

Examples

julia
# Default is loading a vector of strings and chunking them (`TextChunker()`)\nindex = build_index(SimpleIndexer(), texts; chunker_kwargs = (; max_length=10))\n\n# Another example with tags extraction, splitting only sentences and verbose output\n# Assuming `test_files` is a vector of file paths\nindexer = SimpleIndexer(chunker=FileChunker(), tagger=OpenTagger())\nindex = build_index(indexer, test_files; \n        chunker_kwargs(; separators=[". "]), verbose=true)

Notes

source

julia
build_index(\n    indexer::KeywordsIndexer, files_or_docs::Vector{<:AbstractString};\n    verbose::Integer = 1,\n    extras::Union{Nothing, AbstractVector} = nothing,\n    index_id = gensym("ChunkKeywordsIndex"),\n    chunker::AbstractChunker = indexer.chunker,\n    chunker_kwargs::NamedTuple = NamedTuple(),\n    processor::AbstractProcessor = indexer.processor,\n    processor_kwargs::NamedTuple = NamedTuple(),\n    tagger::AbstractTagger = indexer.tagger,\n    tagger_kwargs::NamedTuple = NamedTuple(),\n    api_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0))

Builds a ChunkKeywordsIndex from the provided files or documents to support keyword-based search (BM25).

source


# PromptingTools.Experimental.RAGTools.airagFunction.
julia
airag(cfg::AbstractRAGConfig, index::AbstractDocumentIndex;\n    question::AbstractString,\n    verbose::Integer = 1, return_all::Bool = false,\n    api_kwargs::NamedTuple = NamedTuple(),\n    retriever::AbstractRetriever = cfg.retriever,\n    retriever_kwargs::NamedTuple = NamedTuple(),\n    generator::AbstractGenerator = cfg.generator,\n    generator_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0))

High-level wrapper for Retrieval-Augmented Generation (RAG), it combines together the retrieve and generate! steps which you can customize if needed.

The simplest version first finds the relevant chunks in index for the question and then sends these chunks to the AI model to help with generating a response to the question.

To customize the components, replace the types (retriever, generator) of the corresponding step of the RAG pipeline - or go into sub-routines within the steps. Eg, use subtypes(AbstractRetriever) to find the available options.

Arguments

Returns

See also build_index, retrieve, generate!, RAGResult, getpropertynested, setpropertynested, merge_kwargs_nested, ChunkKeywordsIndex.

Examples

Using airag to get a response for a question:

julia
index = build_index(...)  # create an index\nquestion = "How to make a barplot in Makie.jl?"\nmsg = airag(index; question)

To understand the details of the RAG process, use return_all=true

julia
msg, details = airag(index; question, return_all = true)\n# details is a RAGDetails object with all the internal steps of the `airag` function

You can also pretty-print details to highlight generated text vs text that is supported by context. It also includes annotations of which context was used for each part of the response (where available).

julia
PT.pprint(details)

Example with advanced retrieval (with question rephrasing and reranking (requires COHERE_API_KEY). We will obtain top 100 chunks from embeddings (top_k) and top 5 chunks from reranking (top_n). In addition, it will be done with a "custom" locally-hosted model.

julia
cfg = RAGConfig(; retriever = AdvancedRetriever())\n\n# kwargs will be big and nested, let's prepare them upfront\n# we specify "custom" model for each component that calls LLM\nkwargs = (\n    retriever_kwargs = (;\n        top_k = 100,\n        top_n = 5,\n        rephraser_kwargs = (;\n            model = "custom"),\n        embedder_kwargs = (;\n            model = "custom"),\n        tagger_kwargs = (;\n            model = "custom")),\n    generator_kwargs = (;\n        answerer_kwargs = (;\n            model = "custom"),\n        refiner_kwargs = (;\n            model = "custom")),\n    api_kwargs = (;\n        url = "http://localhost:8080"))\n\nresult = airag(cfg, index, question; kwargs...)

If you want to use hybrid retrieval (embeddings + BM25), you can easily create an additional index based on keywords and pass them both into a MultiIndex.

You need to provide an explicit config, so the pipeline knows how to handle each index in the search similarity phase (finder).

julia
index = # your existing index\n\n# create the multi-index with the keywords index\nindex_keywords = ChunkKeywordsIndex(index)\nmulti_index = MultiIndex([index, index_keywords])\n\n# define the similarity measures for the indices that you have (same order)\nfinder = RT.MultiFinder([RT.CosineSimilarity(), RT.BM25Similarity()])\ncfg = RAGConfig(; retriever=AdvancedRetriever(; processor=RT.KeywordsProcessor(), finder))\n\n# Run the pipeline with the new hybrid retrieval (return the `RAGResult` to see the details)\nresult = airag(cfg, multi_index; question, return_all=true)\n\n# Pretty-print the result\nPT.pprint(result)

For easier manipulation of nested kwargs, see utilities getpropertynested, setpropertynested, merge_kwargs_nested.

source


# PromptingTools.Experimental.RAGTools.retrieveFunction.
julia
retrieve(retriever::AbstractRetriever,\n    index::AbstractChunkIndex,\n    question::AbstractString;\n    verbose::Integer = 1,\n    top_k::Integer = 100,\n    top_n::Integer = 5,\n    api_kwargs::NamedTuple = NamedTuple(),\n    rephraser::AbstractRephraser = retriever.rephraser,\n    rephraser_kwargs::NamedTuple = NamedTuple(),\n    embedder::AbstractEmbedder = retriever.embedder,\n    embedder_kwargs::NamedTuple = NamedTuple(),\n    processor::AbstractProcessor = retriever.processor,\n    processor_kwargs::NamedTuple = NamedTuple(),\n    finder::AbstractSimilarityFinder = retriever.finder,\n    finder_kwargs::NamedTuple = NamedTuple(),\n    tagger::AbstractTagger = retriever.tagger,\n    tagger_kwargs::NamedTuple = NamedTuple(),\n    filter::AbstractTagFilter = retriever.filter,\n    filter_kwargs::NamedTuple = NamedTuple(),\n    reranker::AbstractReranker = retriever.reranker,\n    reranker_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Retrieves the most relevant chunks from the index for the given question and returns them in the RAGResult object.

This is the main entry point for the retrieval stage of the RAG pipeline. It is often followed by generate! step.

Notes:

The arguments correspond to the steps of the retrieval process (rephrasing, embedding, finding similar docs, tagging, filtering by tags, reranking). You can customize each step by providing a new custom type that dispatches the corresponding function, eg, create your own type struct MyReranker<:AbstractReranker end and define the custom method for it rerank(::MyReranker,...) = ....

Note: Discover available retrieval sub-types for each step with subtypes(AbstractRephraser) and similar for other abstract types.

If you're using locally-hosted models, you can pass the api_kwargs with the url field set to the model's URL and make sure to provide corresponding model kwargs to rephraser, embedder, and tagger to use the custom models (they make AI calls).

Arguments

See also: SimpleRetriever, AdvancedRetriever, build_index, rephrase, get_embeddings, get_keywords, find_closest, get_tags, find_tags, rerank, RAGResult.

Examples

Find the 5 most relevant chunks from the index for the given question.

julia
# assumes you have an existing index `index`\nretriever = SimpleRetriever()\n\nresult = retrieve(retriever,\n    index,\n    "What is the capital of France?",\n    top_n = 5)\n\n# or use the default retriever (same as above)\nresult = retrieve(retriever,\n    index,\n    "What is the capital of France?",\n    top_n = 5)

Apply more advanced retrieval with question rephrasing and reranking (requires COHERE_API_KEY). We will obtain top 100 chunks from embeddings (top_k) and top 5 chunks from reranking (top_n).

julia
retriever = AdvancedRetriever()\n\nresult = retrieve(retriever, index, question; top_k=100, top_n=5)

You can use the retriever to customize your retrieval strategy or directly change the strategy types in the retrieve kwargs!

Example of using locally-hosted model hosted on localhost:8080:

julia
retriever = SimpleRetriever()\nresult = retrieve(retriever, index, question;\n    rephraser_kwargs = (; model = "custom"),\n    embedder_kwargs = (; model = "custom"),\n    tagger_kwargs = (; model = "custom"), api_kwargs = (;\n        url = "http://localhost:8080"))

source


# PromptingTools.Experimental.RAGTools.generate!Function.
julia
generate!(\n    generator::AbstractGenerator, index::AbstractDocumentIndex, result::AbstractRAGResult;\n    verbose::Integer = 1,\n    api_kwargs::NamedTuple = NamedTuple(),\n    contexter::AbstractContextBuilder = generator.contexter,\n    contexter_kwargs::NamedTuple = NamedTuple(),\n    answerer::AbstractAnswerer = generator.answerer,\n    answerer_kwargs::NamedTuple = NamedTuple(),\n    refiner::AbstractRefiner = generator.refiner,\n    refiner_kwargs::NamedTuple = NamedTuple(),\n    postprocessor::AbstractPostprocessor = generator.postprocessor,\n    postprocessor_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Generate the response using the provided generator and the index and result. It is the second step in the RAG pipeline (after retrieve)

Returns the mutated result with the result.final_answer and the full conversation saved in result.conversations[:final_answer].

Notes

Arguments

See also: retrieve, build_context!, ContextEnumerator, answer!, SimpleAnswerer, refine!, NoRefiner, SimpleRefiner, postprocess!, NoPostprocessor

Examples

julia
Assume we already have `index`\n\nquestion = "What are the best practices for parallel computing in Julia?"\n\n# Retrieve the relevant chunks - returns RAGResult\nresult = retrieve(index, question)\n\n# Generate the answer using the default generator, mutates the same result\nresult = generate!(index, result)

source


# PromptingTools.Experimental.RAGTools.annotate_supportFunction.
julia
annotate_support(annotater::TrigramAnnotater, answer::AbstractString,\n    context::AbstractVector; min_score::Float64 = 0.5,\n    skip_trigrams::Bool = true, hashed::Bool = true,\n    sources::Union{Nothing, AbstractVector{<:AbstractString}} = nothing,\n    min_source_score::Float64 = 0.25,\n    add_sources::Bool = true,\n    add_scores::Bool = true, kwargs...)

Annotates the answer with the overlap/what's supported in context and returns the annotated tree of nodes representing the answer

Returns a "root" node with children nodes representing the sentences/code blocks in the answer. Only the "leaf" nodes are to be printed (to avoid duplication), "leaf" nodes are those with NO children.

Default logic:

Arguments

Example

julia
annotater = TrigramAnnotater()\ncontext = [\n    "This is a test context.", "Another context sentence.", "Final piece of context."]\nanswer = "This is a test context. Another context sentence."\n\nannotated_root = annotate_support(annotater, answer, context)\npprint(annotated_root) # pretty print the annotated tree

source

julia
annotate_support(\n    annotater::TrigramAnnotater, result::AbstractRAGResult; min_score::Float64 = 0.5,\n    skip_trigrams::Bool = true, hashed::Bool = true,\n    min_source_score::Float64 = 0.25,\n    add_sources::Bool = true,\n    add_scores::Bool = true, kwargs...)

Dispatch for annotate_support for AbstractRAGResult type. It extracts the final_answer and context from the result and calls annotate_support with them.

See annotate_support for more details.

Example

julia
res = RAGResult(; question = "", final_answer = "This is a test.",\n    context = ["Test context.", "Completely different"])\nannotated_root = annotate_support(annotater, res)\nPT.pprint(annotated_root)

source


# PromptingTools.Experimental.RAGTools.build_qa_evalsFunction.
julia
build_qa_evals(doc_chunks::Vector{<:AbstractString}, sources::Vector{<:AbstractString};\n               model=PT.MODEL_CHAT, instructions="None.", qa_template::Symbol=:RAGCreateQAFromContext, \n               verbose::Bool=true, api_kwargs::NamedTuple = NamedTuple(), kwargs...) -> Vector{QAEvalItem}

Create a collection of question and answer evaluations (QAEvalItem) from document chunks and sources. This function generates Q&A pairs based on the provided document chunks, using a specified AI model and template.

Arguments

Returns

Vector{QAEvalItem}: A vector of QAEvalItem structs, each containing a source, context, question, and answer. Invalid or empty items are filtered out.

Notes

Examples

Creating Q&A evaluations from a set of document chunks:

julia
doc_chunks = ["Text from document 1", "Text from document 2"]\nsources = ["source1", "source2"]\nqa_evals = build_qa_evals(doc_chunks, sources)

source


', 88) ])); } const rag_tools_intro = /* @__PURE__ */ _export_sfc(_sfc_main, [["render", _sfc_render]]); diff --git a/previews/PR218/assets/extra_tools_text_utilities_intro.md.B_lqAVJR.js b/previews/PR218/assets/extra_tools_text_utilities_intro.md.Cls15k4M.js similarity index 98% rename from previews/PR218/assets/extra_tools_text_utilities_intro.md.B_lqAVJR.js rename to previews/PR218/assets/extra_tools_text_utilities_intro.md.Cls15k4M.js index 3eb412835..18ada5a65 100644 --- a/previews/PR218/assets/extra_tools_text_utilities_intro.md.B_lqAVJR.js +++ b/previews/PR218/assets/extra_tools_text_utilities_intro.md.Cls15k4M.js @@ -3,7 +3,7 @@ const __pageData = JSON.parse('{"title":"Text Utilities","description":"","front const _sfc_main = { name: "extra_tools/text_utilities_intro.md" }; function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { return openBlock(), createElementBlock("div", null, _cache[0] || (_cache[0] = [ - createStaticVNode('

Text Utilities

Working with Generative AI (and in particular with the text modality), requires a lot of text manipulation. PromptingTools.jl provides a set of utilities to make this process easier and more efficient.

Highlights

The main functions to be aware of are

You can import them simply via:

julia
using PromptingTools: recursive_splitter, replace_words, wrap_string, length_longest_common_subsequence, distance_longest_common_subsequence

There are many more (especially in the AgentTools and RAGTools experimental modules)!

RAGTools module contains the following text utilities:

Feel free to open an issue or ask in the #generative-ai channel in the JuliaLang Slack if you have a specific need.

References

# PromptingTools.recursive_splitterFunction.
julia
recursive_splitter(text::String; separator::String=" ", max_length::Int=35000) -> Vector{String}

Split a given string text into chunks of a specified maximum length max_length. This is particularly useful for splitting larger documents or texts into smaller segments, suitable for models or systems with smaller context windows.

There is a method for dispatching on multiple separators, recursive_splitter(text::String, separators::Vector{String}; max_length::Int=35000) -> Vector{String} that mimics the logic of Langchain's RecursiveCharacterTextSplitter.

Arguments

Returns

Vector{String}: A vector of strings, each representing a chunk of the original text that is smaller than or equal to max_length.

Notes

Examples

Splitting text with the default separator (" "):

julia
text = "Hello world. How are you?"\nchunks = recursive_splitter(text; max_length=13)\nlength(chunks) # Output: 2

Using a custom separator and custom max_length

julia
text = "Hello,World," ^ 2900 # length 34900 chars\nrecursive_splitter(text; separator=",", max_length=10000) # for 4K context window\nlength(chunks[1]) # Output: 4

source

julia
recursive_splitter(text::AbstractString, separators::Vector{String}; max_length::Int=35000) -> Vector{String}

Split a given string text into chunks recursively using a series of separators, with each chunk having a maximum length of max_length (if it's achievable given the separators provided). This function is useful for splitting large documents or texts into smaller segments that are more manageable for processing, particularly for models or systems with limited context windows.

It was previously known as split_by_length.

This is similar to Langchain's RecursiveCharacterTextSplitter. To achieve the same behavior, use separators=["\\n\\n", "\\n", " ", ""].

Arguments

Returns

Vector{String}: A vector of strings, where each string is a chunk of the original text that is smaller than or equal to max_length.

Usage Tips

How It Works

Examples

Splitting text using multiple separators:

julia
text = "Paragraph 1\\n\\nParagraph 2. Sentence 1. Sentence 2.\\nParagraph 3"\nseparators = ["\\n\\n", ". ", "\\n"] # split by paragraphs, sentences, and newlines (not by words)\nchunks = recursive_splitter(text, separators, max_length=20)

Splitting text using multiple separators - with splitting on words:

julia
text = "Paragraph 1\\n\\nParagraph 2. Sentence 1. Sentence 2.\\nParagraph 3"\nseparators = ["\\n\\n", ". ", "\\n", " "] # split by paragraphs, sentences, and newlines, words\nchunks = recursive_splitter(text, separators, max_length=10)

Using a single separator:

julia
text = "Hello,World," ^ 2900  # length 34900 characters\nchunks = recursive_splitter(text, [","], max_length=10000)

To achieve the same behavior as Langchain's RecursiveCharacterTextSplitter, use separators=["\\n\\n", "\\n", " ", ""].

julia
text = "Paragraph 1\\n\\nParagraph 2. Sentence 1. Sentence 2.\\nParagraph 3"\nseparators = ["\\n\\n", "\\n", " ", ""]\nchunks = recursive_splitter(text, separators, max_length=10)

source


# PromptingTools.replace_wordsFunction.
julia
replace_words(text::AbstractString, words::Vector{<:AbstractString}; replacement::AbstractString="ABC")

Replace all occurrences of words in words with replacement in text. Useful to quickly remove specific names or entities from a text.

Arguments

Example

julia
text = "Disney is a great company"\nreplace_words(text, ["Disney", "Snow White", "Mickey Mouse"])\n# Output: "ABC is a great company"

source


# PromptingTools.wrap_stringFunction.
julia
wrap_string(str::String,\n    text_width::Int = 20;\n    newline::Union{AbstractString, AbstractChar} = '

')

Breaks a string into lines of a given text_width. Optionally, you can specify the newline character or string to use.

Example:

julia
wrap_string("Certainly, here's a function in Julia that will wrap a string according to the specifications:", 10) |> print

source


# PromptingTools.length_longest_common_subsequenceFunction.
julia
length_longest_common_subsequence(itr1::AbstractString, itr2::AbstractString)

Compute the length of the longest common subsequence between two string sequences (ie, the higher the number, the better the match).

Source: https://cn.julialang.org/LeetCode.jl/dev/democards/problems/problems/1143.longest-common-subsequence/

Arguments

Returns

The length of the longest common subsequence.

Examples

julia
text1 = "abc-abc----"\ntext2 = "___ab_c__abc"\nlongest_common_subsequence(text1, text2)\n# Output: 6 (-> "abcabc")

It can be used to fuzzy match strings and find the similarity between them (Tip: normalize the match)

julia
commands = ["product recommendation", "emotions", "specific product advice", "checkout advice"]\nquery = "Which product can you recommend for me?"\nlet pos = argmax(length_longest_common_subsequence.(Ref(query), commands))\n    dist = length_longest_common_subsequence(query, commands[pos])\n    norm = dist / min(length(query), length(commands[pos]))\n    @info "The closest command to the query: "$(query)" is: "$(commands[pos])" (distance: $(dist), normalized: $(norm))"\nend

But it might be easier to use directly the convenience wrapper distance_longest_common_subsequence!

\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/utils.jl#L252-L288)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.distance_longest_common_subsequence-extra_tools-text_utilities_intro' href='#PromptingTools.distance_longest_common_subsequence-extra_tools-text_utilities_intro'>#</a>&nbsp;<b><u>PromptingTools.distance_longest_common_subsequence</u></b> &mdash; <i>Function</i>.\n\n\n\n\n```julia\ndistance_longest_common_subsequence(\n    input1::AbstractString, input2::AbstractString)\n\ndistance_longest_common_subsequence(\n    input1::AbstractString, input2::AbstractVector{<:AbstractString})

Measures distance between two strings using the length of the longest common subsequence (ie, the lower the number, the better the match). Perfect match is distance = 0.0

Convenience wrapper around length_longest_common_subsequence to normalize the distances to 0-1 range. There is a also a dispatch for comparing a string vs an array of strings.

Notes

Arguments

Example

You can also use it to find the closest context for some AI generated summary/story:

julia
context = ["The enigmatic stranger vanished as swiftly as a wisp of smoke, leaving behind a trail of unanswered questions.",\n    "Beneath the shimmering moonlight, the ocean whispered secrets only the stars could hear.",\n    "The ancient tree stood as a silent guardian, its gnarled branches reaching for the heavens.",\n    "The melody danced through the air, painting a vibrant tapestry of emotions.",\n    "Time flowed like a relentless river, carrying away memories and leaving imprints in its wake."]\n\nstory = """\n    Beneath the shimmering moonlight, the ocean whispered secrets only the stars could hear.\n\n    Under the celestial tapestry, the vast ocean whispered its secrets to the indifferent stars. Each ripple, a murmured confidence, each wave, a whispered lament. The glittering celestial bodies listened in silent complicity, their enigmatic gaze reflecting the ocean's unspoken truths. The cosmic dance between the sea and the sky, a symphony of shared secrets, forever echoing in the ethereal expanse.\n    """\n\ndist = distance_longest_common_subsequence(story, context)\n@info "The closest context to the query: "$(first(story,20))..." is: "$(context[argmin(dist)])" (distance: $(minimum(dist)))"

source


', 20) + createStaticVNode('

Text Utilities

Working with Generative AI (and in particular with the text modality), requires a lot of text manipulation. PromptingTools.jl provides a set of utilities to make this process easier and more efficient.

Highlights

The main functions to be aware of are

You can import them simply via:

julia
using PromptingTools: recursive_splitter, replace_words, wrap_string, length_longest_common_subsequence, distance_longest_common_subsequence

There are many more (especially in the AgentTools and RAGTools experimental modules)!

RAGTools module contains the following text utilities:

Feel free to open an issue or ask in the #generative-ai channel in the JuliaLang Slack if you have a specific need.

References

# PromptingTools.recursive_splitterFunction.
julia
recursive_splitter(text::String; separator::String=" ", max_length::Int=35000) -> Vector{String}

Split a given string text into chunks of a specified maximum length max_length. This is particularly useful for splitting larger documents or texts into smaller segments, suitable for models or systems with smaller context windows.

There is a method for dispatching on multiple separators, recursive_splitter(text::String, separators::Vector{String}; max_length::Int=35000) -> Vector{String} that mimics the logic of Langchain's RecursiveCharacterTextSplitter.

Arguments

Returns

Vector{String}: A vector of strings, each representing a chunk of the original text that is smaller than or equal to max_length.

Notes

Examples

Splitting text with the default separator (" "):

julia
text = "Hello world. How are you?"\nchunks = recursive_splitter(text; max_length=13)\nlength(chunks) # Output: 2

Using a custom separator and custom max_length

julia
text = "Hello,World," ^ 2900 # length 34900 chars\nrecursive_splitter(text; separator=",", max_length=10000) # for 4K context window\nlength(chunks[1]) # Output: 4

source

julia
recursive_splitter(text::AbstractString, separators::Vector{String}; max_length::Int=35000) -> Vector{String}

Split a given string text into chunks recursively using a series of separators, with each chunk having a maximum length of max_length (if it's achievable given the separators provided). This function is useful for splitting large documents or texts into smaller segments that are more manageable for processing, particularly for models or systems with limited context windows.

It was previously known as split_by_length.

This is similar to Langchain's RecursiveCharacterTextSplitter. To achieve the same behavior, use separators=["\\n\\n", "\\n", " ", ""].

Arguments

Returns

Vector{String}: A vector of strings, where each string is a chunk of the original text that is smaller than or equal to max_length.

Usage Tips

How It Works

Examples

Splitting text using multiple separators:

julia
text = "Paragraph 1\\n\\nParagraph 2. Sentence 1. Sentence 2.\\nParagraph 3"\nseparators = ["\\n\\n", ". ", "\\n"] # split by paragraphs, sentences, and newlines (not by words)\nchunks = recursive_splitter(text, separators, max_length=20)

Splitting text using multiple separators - with splitting on words:

julia
text = "Paragraph 1\\n\\nParagraph 2. Sentence 1. Sentence 2.\\nParagraph 3"\nseparators = ["\\n\\n", ". ", "\\n", " "] # split by paragraphs, sentences, and newlines, words\nchunks = recursive_splitter(text, separators, max_length=10)

Using a single separator:

julia
text = "Hello,World," ^ 2900  # length 34900 characters\nchunks = recursive_splitter(text, [","], max_length=10000)

To achieve the same behavior as Langchain's RecursiveCharacterTextSplitter, use separators=["\\n\\n", "\\n", " ", ""].

julia
text = "Paragraph 1\\n\\nParagraph 2. Sentence 1. Sentence 2.\\nParagraph 3"\nseparators = ["\\n\\n", "\\n", " ", ""]\nchunks = recursive_splitter(text, separators, max_length=10)

source


# PromptingTools.replace_wordsFunction.
julia
replace_words(text::AbstractString, words::Vector{<:AbstractString}; replacement::AbstractString="ABC")

Replace all occurrences of words in words with replacement in text. Useful to quickly remove specific names or entities from a text.

Arguments

Example

julia
text = "Disney is a great company"\nreplace_words(text, ["Disney", "Snow White", "Mickey Mouse"])\n# Output: "ABC is a great company"

source


# PromptingTools.wrap_stringFunction.
julia
wrap_string(str::String,\n    text_width::Int = 20;\n    newline::Union{AbstractString, AbstractChar} = '

')

Breaks a string into lines of a given text_width. Optionally, you can specify the newline character or string to use.

Example:

julia
wrap_string("Certainly, here's a function in Julia that will wrap a string according to the specifications:", 10) |> print

source


# PromptingTools.length_longest_common_subsequenceFunction.
julia
length_longest_common_subsequence(itr1::AbstractString, itr2::AbstractString)

Compute the length of the longest common subsequence between two string sequences (ie, the higher the number, the better the match).

Source: https://cn.julialang.org/LeetCode.jl/dev/democards/problems/problems/1143.longest-common-subsequence/

Arguments

Returns

The length of the longest common subsequence.

Examples

julia
text1 = "abc-abc----"\ntext2 = "___ab_c__abc"\nlongest_common_subsequence(text1, text2)\n# Output: 6 (-> "abcabc")

It can be used to fuzzy match strings and find the similarity between them (Tip: normalize the match)

julia
commands = ["product recommendation", "emotions", "specific product advice", "checkout advice"]\nquery = "Which product can you recommend for me?"\nlet pos = argmax(length_longest_common_subsequence.(Ref(query), commands))\n    dist = length_longest_common_subsequence(query, commands[pos])\n    norm = dist / min(length(query), length(commands[pos]))\n    @info "The closest command to the query: "$(query)" is: "$(commands[pos])" (distance: $(dist), normalized: $(norm))"\nend

But it might be easier to use directly the convenience wrapper distance_longest_common_subsequence!

\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/utils.jl#L252-L288)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.distance_longest_common_subsequence-extra_tools-text_utilities_intro' href='#PromptingTools.distance_longest_common_subsequence-extra_tools-text_utilities_intro'>#</a>&nbsp;<b><u>PromptingTools.distance_longest_common_subsequence</u></b> &mdash; <i>Function</i>.\n\n\n\n\n```julia\ndistance_longest_common_subsequence(\n    input1::AbstractString, input2::AbstractString)\n\ndistance_longest_common_subsequence(\n    input1::AbstractString, input2::AbstractVector{<:AbstractString})

Measures distance between two strings using the length of the longest common subsequence (ie, the lower the number, the better the match). Perfect match is distance = 0.0

Convenience wrapper around length_longest_common_subsequence to normalize the distances to 0-1 range. There is a also a dispatch for comparing a string vs an array of strings.

Notes

Arguments

Example

You can also use it to find the closest context for some AI generated summary/story:

julia
context = ["The enigmatic stranger vanished as swiftly as a wisp of smoke, leaving behind a trail of unanswered questions.",\n    "Beneath the shimmering moonlight, the ocean whispered secrets only the stars could hear.",\n    "The ancient tree stood as a silent guardian, its gnarled branches reaching for the heavens.",\n    "The melody danced through the air, painting a vibrant tapestry of emotions.",\n    "Time flowed like a relentless river, carrying away memories and leaving imprints in its wake."]\n\nstory = """\n    Beneath the shimmering moonlight, the ocean whispered secrets only the stars could hear.\n\n    Under the celestial tapestry, the vast ocean whispered its secrets to the indifferent stars. Each ripple, a murmured confidence, each wave, a whispered lament. The glittering celestial bodies listened in silent complicity, their enigmatic gaze reflecting the ocean's unspoken truths. The cosmic dance between the sea and the sky, a symphony of shared secrets, forever echoing in the ethereal expanse.\n    """\n\ndist = distance_longest_common_subsequence(story, context)\n@info "The closest context to the query: "$(first(story,20))..." is: "$(context[argmin(dist)])" (distance: $(minimum(dist)))"

source


', 20) ])); } const text_utilities_intro = /* @__PURE__ */ _export_sfc(_sfc_main, [["render", _sfc_render]]); diff --git a/previews/PR218/assets/extra_tools_text_utilities_intro.md.B_lqAVJR.lean.js b/previews/PR218/assets/extra_tools_text_utilities_intro.md.Cls15k4M.lean.js similarity index 98% rename from previews/PR218/assets/extra_tools_text_utilities_intro.md.B_lqAVJR.lean.js rename to previews/PR218/assets/extra_tools_text_utilities_intro.md.Cls15k4M.lean.js index 3eb412835..18ada5a65 100644 --- a/previews/PR218/assets/extra_tools_text_utilities_intro.md.B_lqAVJR.lean.js +++ b/previews/PR218/assets/extra_tools_text_utilities_intro.md.Cls15k4M.lean.js @@ -3,7 +3,7 @@ const __pageData = JSON.parse('{"title":"Text Utilities","description":"","front const _sfc_main = { name: "extra_tools/text_utilities_intro.md" }; function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { return openBlock(), createElementBlock("div", null, _cache[0] || (_cache[0] = [ - createStaticVNode('

Text Utilities

Working with Generative AI (and in particular with the text modality), requires a lot of text manipulation. PromptingTools.jl provides a set of utilities to make this process easier and more efficient.

Highlights

The main functions to be aware of are

You can import them simply via:

julia
using PromptingTools: recursive_splitter, replace_words, wrap_string, length_longest_common_subsequence, distance_longest_common_subsequence

There are many more (especially in the AgentTools and RAGTools experimental modules)!

RAGTools module contains the following text utilities:

Feel free to open an issue or ask in the #generative-ai channel in the JuliaLang Slack if you have a specific need.

References

# PromptingTools.recursive_splitterFunction.
julia
recursive_splitter(text::String; separator::String=" ", max_length::Int=35000) -> Vector{String}

Split a given string text into chunks of a specified maximum length max_length. This is particularly useful for splitting larger documents or texts into smaller segments, suitable for models or systems with smaller context windows.

There is a method for dispatching on multiple separators, recursive_splitter(text::String, separators::Vector{String}; max_length::Int=35000) -> Vector{String} that mimics the logic of Langchain's RecursiveCharacterTextSplitter.

Arguments

Returns

Vector{String}: A vector of strings, each representing a chunk of the original text that is smaller than or equal to max_length.

Notes

Examples

Splitting text with the default separator (" "):

julia
text = "Hello world. How are you?"\nchunks = recursive_splitter(text; max_length=13)\nlength(chunks) # Output: 2

Using a custom separator and custom max_length

julia
text = "Hello,World," ^ 2900 # length 34900 chars\nrecursive_splitter(text; separator=",", max_length=10000) # for 4K context window\nlength(chunks[1]) # Output: 4

source

julia
recursive_splitter(text::AbstractString, separators::Vector{String}; max_length::Int=35000) -> Vector{String}

Split a given string text into chunks recursively using a series of separators, with each chunk having a maximum length of max_length (if it's achievable given the separators provided). This function is useful for splitting large documents or texts into smaller segments that are more manageable for processing, particularly for models or systems with limited context windows.

It was previously known as split_by_length.

This is similar to Langchain's RecursiveCharacterTextSplitter. To achieve the same behavior, use separators=["\\n\\n", "\\n", " ", ""].

Arguments

Returns

Vector{String}: A vector of strings, where each string is a chunk of the original text that is smaller than or equal to max_length.

Usage Tips

How It Works

Examples

Splitting text using multiple separators:

julia
text = "Paragraph 1\\n\\nParagraph 2. Sentence 1. Sentence 2.\\nParagraph 3"\nseparators = ["\\n\\n", ". ", "\\n"] # split by paragraphs, sentences, and newlines (not by words)\nchunks = recursive_splitter(text, separators, max_length=20)

Splitting text using multiple separators - with splitting on words:

julia
text = "Paragraph 1\\n\\nParagraph 2. Sentence 1. Sentence 2.\\nParagraph 3"\nseparators = ["\\n\\n", ". ", "\\n", " "] # split by paragraphs, sentences, and newlines, words\nchunks = recursive_splitter(text, separators, max_length=10)

Using a single separator:

julia
text = "Hello,World," ^ 2900  # length 34900 characters\nchunks = recursive_splitter(text, [","], max_length=10000)

To achieve the same behavior as Langchain's RecursiveCharacterTextSplitter, use separators=["\\n\\n", "\\n", " ", ""].

julia
text = "Paragraph 1\\n\\nParagraph 2. Sentence 1. Sentence 2.\\nParagraph 3"\nseparators = ["\\n\\n", "\\n", " ", ""]\nchunks = recursive_splitter(text, separators, max_length=10)

source


# PromptingTools.replace_wordsFunction.
julia
replace_words(text::AbstractString, words::Vector{<:AbstractString}; replacement::AbstractString="ABC")

Replace all occurrences of words in words with replacement in text. Useful to quickly remove specific names or entities from a text.

Arguments

Example

julia
text = "Disney is a great company"\nreplace_words(text, ["Disney", "Snow White", "Mickey Mouse"])\n# Output: "ABC is a great company"

source


# PromptingTools.wrap_stringFunction.
julia
wrap_string(str::String,\n    text_width::Int = 20;\n    newline::Union{AbstractString, AbstractChar} = '

')

Breaks a string into lines of a given text_width. Optionally, you can specify the newline character or string to use.

Example:

julia
wrap_string("Certainly, here's a function in Julia that will wrap a string according to the specifications:", 10) |> print

source


# PromptingTools.length_longest_common_subsequenceFunction.
julia
length_longest_common_subsequence(itr1::AbstractString, itr2::AbstractString)

Compute the length of the longest common subsequence between two string sequences (ie, the higher the number, the better the match).

Source: https://cn.julialang.org/LeetCode.jl/dev/democards/problems/problems/1143.longest-common-subsequence/

Arguments

Returns

The length of the longest common subsequence.

Examples

julia
text1 = "abc-abc----"\ntext2 = "___ab_c__abc"\nlongest_common_subsequence(text1, text2)\n# Output: 6 (-> "abcabc")

It can be used to fuzzy match strings and find the similarity between them (Tip: normalize the match)

julia
commands = ["product recommendation", "emotions", "specific product advice", "checkout advice"]\nquery = "Which product can you recommend for me?"\nlet pos = argmax(length_longest_common_subsequence.(Ref(query), commands))\n    dist = length_longest_common_subsequence(query, commands[pos])\n    norm = dist / min(length(query), length(commands[pos]))\n    @info "The closest command to the query: "$(query)" is: "$(commands[pos])" (distance: $(dist), normalized: $(norm))"\nend

But it might be easier to use directly the convenience wrapper distance_longest_common_subsequence!

\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/utils.jl#L252-L288)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.distance_longest_common_subsequence-extra_tools-text_utilities_intro' href='#PromptingTools.distance_longest_common_subsequence-extra_tools-text_utilities_intro'>#</a>&nbsp;<b><u>PromptingTools.distance_longest_common_subsequence</u></b> &mdash; <i>Function</i>.\n\n\n\n\n```julia\ndistance_longest_common_subsequence(\n    input1::AbstractString, input2::AbstractString)\n\ndistance_longest_common_subsequence(\n    input1::AbstractString, input2::AbstractVector{<:AbstractString})

Measures distance between two strings using the length of the longest common subsequence (ie, the lower the number, the better the match). Perfect match is distance = 0.0

Convenience wrapper around length_longest_common_subsequence to normalize the distances to 0-1 range. There is a also a dispatch for comparing a string vs an array of strings.

Notes

Arguments

Example

You can also use it to find the closest context for some AI generated summary/story:

julia
context = ["The enigmatic stranger vanished as swiftly as a wisp of smoke, leaving behind a trail of unanswered questions.",\n    "Beneath the shimmering moonlight, the ocean whispered secrets only the stars could hear.",\n    "The ancient tree stood as a silent guardian, its gnarled branches reaching for the heavens.",\n    "The melody danced through the air, painting a vibrant tapestry of emotions.",\n    "Time flowed like a relentless river, carrying away memories and leaving imprints in its wake."]\n\nstory = """\n    Beneath the shimmering moonlight, the ocean whispered secrets only the stars could hear.\n\n    Under the celestial tapestry, the vast ocean whispered its secrets to the indifferent stars. Each ripple, a murmured confidence, each wave, a whispered lament. The glittering celestial bodies listened in silent complicity, their enigmatic gaze reflecting the ocean's unspoken truths. The cosmic dance between the sea and the sky, a symphony of shared secrets, forever echoing in the ethereal expanse.\n    """\n\ndist = distance_longest_common_subsequence(story, context)\n@info "The closest context to the query: "$(first(story,20))..." is: "$(context[argmin(dist)])" (distance: $(minimum(dist)))"

source


', 20) + createStaticVNode('

Text Utilities

Working with Generative AI (and in particular with the text modality), requires a lot of text manipulation. PromptingTools.jl provides a set of utilities to make this process easier and more efficient.

Highlights

The main functions to be aware of are

You can import them simply via:

julia
using PromptingTools: recursive_splitter, replace_words, wrap_string, length_longest_common_subsequence, distance_longest_common_subsequence

There are many more (especially in the AgentTools and RAGTools experimental modules)!

RAGTools module contains the following text utilities:

Feel free to open an issue or ask in the #generative-ai channel in the JuliaLang Slack if you have a specific need.

References

# PromptingTools.recursive_splitterFunction.
julia
recursive_splitter(text::String; separator::String=" ", max_length::Int=35000) -> Vector{String}

Split a given string text into chunks of a specified maximum length max_length. This is particularly useful for splitting larger documents or texts into smaller segments, suitable for models or systems with smaller context windows.

There is a method for dispatching on multiple separators, recursive_splitter(text::String, separators::Vector{String}; max_length::Int=35000) -> Vector{String} that mimics the logic of Langchain's RecursiveCharacterTextSplitter.

Arguments

Returns

Vector{String}: A vector of strings, each representing a chunk of the original text that is smaller than or equal to max_length.

Notes

Examples

Splitting text with the default separator (" "):

julia
text = "Hello world. How are you?"\nchunks = recursive_splitter(text; max_length=13)\nlength(chunks) # Output: 2

Using a custom separator and custom max_length

julia
text = "Hello,World," ^ 2900 # length 34900 chars\nrecursive_splitter(text; separator=",", max_length=10000) # for 4K context window\nlength(chunks[1]) # Output: 4

source

julia
recursive_splitter(text::AbstractString, separators::Vector{String}; max_length::Int=35000) -> Vector{String}

Split a given string text into chunks recursively using a series of separators, with each chunk having a maximum length of max_length (if it's achievable given the separators provided). This function is useful for splitting large documents or texts into smaller segments that are more manageable for processing, particularly for models or systems with limited context windows.

It was previously known as split_by_length.

This is similar to Langchain's RecursiveCharacterTextSplitter. To achieve the same behavior, use separators=["\\n\\n", "\\n", " ", ""].

Arguments

Returns

Vector{String}: A vector of strings, where each string is a chunk of the original text that is smaller than or equal to max_length.

Usage Tips

How It Works

Examples

Splitting text using multiple separators:

julia
text = "Paragraph 1\\n\\nParagraph 2. Sentence 1. Sentence 2.\\nParagraph 3"\nseparators = ["\\n\\n", ". ", "\\n"] # split by paragraphs, sentences, and newlines (not by words)\nchunks = recursive_splitter(text, separators, max_length=20)

Splitting text using multiple separators - with splitting on words:

julia
text = "Paragraph 1\\n\\nParagraph 2. Sentence 1. Sentence 2.\\nParagraph 3"\nseparators = ["\\n\\n", ". ", "\\n", " "] # split by paragraphs, sentences, and newlines, words\nchunks = recursive_splitter(text, separators, max_length=10)

Using a single separator:

julia
text = "Hello,World," ^ 2900  # length 34900 characters\nchunks = recursive_splitter(text, [","], max_length=10000)

To achieve the same behavior as Langchain's RecursiveCharacterTextSplitter, use separators=["\\n\\n", "\\n", " ", ""].

julia
text = "Paragraph 1\\n\\nParagraph 2. Sentence 1. Sentence 2.\\nParagraph 3"\nseparators = ["\\n\\n", "\\n", " ", ""]\nchunks = recursive_splitter(text, separators, max_length=10)

source


# PromptingTools.replace_wordsFunction.
julia
replace_words(text::AbstractString, words::Vector{<:AbstractString}; replacement::AbstractString="ABC")

Replace all occurrences of words in words with replacement in text. Useful to quickly remove specific names or entities from a text.

Arguments

Example

julia
text = "Disney is a great company"\nreplace_words(text, ["Disney", "Snow White", "Mickey Mouse"])\n# Output: "ABC is a great company"

source


# PromptingTools.wrap_stringFunction.
julia
wrap_string(str::String,\n    text_width::Int = 20;\n    newline::Union{AbstractString, AbstractChar} = '

')

Breaks a string into lines of a given text_width. Optionally, you can specify the newline character or string to use.

Example:

julia
wrap_string("Certainly, here's a function in Julia that will wrap a string according to the specifications:", 10) |> print

source


# PromptingTools.length_longest_common_subsequenceFunction.
julia
length_longest_common_subsequence(itr1::AbstractString, itr2::AbstractString)

Compute the length of the longest common subsequence between two string sequences (ie, the higher the number, the better the match).

Source: https://cn.julialang.org/LeetCode.jl/dev/democards/problems/problems/1143.longest-common-subsequence/

Arguments

Returns

The length of the longest common subsequence.

Examples

julia
text1 = "abc-abc----"\ntext2 = "___ab_c__abc"\nlongest_common_subsequence(text1, text2)\n# Output: 6 (-> "abcabc")

It can be used to fuzzy match strings and find the similarity between them (Tip: normalize the match)

julia
commands = ["product recommendation", "emotions", "specific product advice", "checkout advice"]\nquery = "Which product can you recommend for me?"\nlet pos = argmax(length_longest_common_subsequence.(Ref(query), commands))\n    dist = length_longest_common_subsequence(query, commands[pos])\n    norm = dist / min(length(query), length(commands[pos]))\n    @info "The closest command to the query: "$(query)" is: "$(commands[pos])" (distance: $(dist), normalized: $(norm))"\nend

But it might be easier to use directly the convenience wrapper distance_longest_common_subsequence!

\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/utils.jl#L252-L288)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.distance_longest_common_subsequence-extra_tools-text_utilities_intro' href='#PromptingTools.distance_longest_common_subsequence-extra_tools-text_utilities_intro'>#</a>&nbsp;<b><u>PromptingTools.distance_longest_common_subsequence</u></b> &mdash; <i>Function</i>.\n\n\n\n\n```julia\ndistance_longest_common_subsequence(\n    input1::AbstractString, input2::AbstractString)\n\ndistance_longest_common_subsequence(\n    input1::AbstractString, input2::AbstractVector{<:AbstractString})

Measures distance between two strings using the length of the longest common subsequence (ie, the lower the number, the better the match). Perfect match is distance = 0.0

Convenience wrapper around length_longest_common_subsequence to normalize the distances to 0-1 range. There is a also a dispatch for comparing a string vs an array of strings.

Notes

Arguments

Example

You can also use it to find the closest context for some AI generated summary/story:

julia
context = ["The enigmatic stranger vanished as swiftly as a wisp of smoke, leaving behind a trail of unanswered questions.",\n    "Beneath the shimmering moonlight, the ocean whispered secrets only the stars could hear.",\n    "The ancient tree stood as a silent guardian, its gnarled branches reaching for the heavens.",\n    "The melody danced through the air, painting a vibrant tapestry of emotions.",\n    "Time flowed like a relentless river, carrying away memories and leaving imprints in its wake."]\n\nstory = """\n    Beneath the shimmering moonlight, the ocean whispered secrets only the stars could hear.\n\n    Under the celestial tapestry, the vast ocean whispered its secrets to the indifferent stars. Each ripple, a murmured confidence, each wave, a whispered lament. The glittering celestial bodies listened in silent complicity, their enigmatic gaze reflecting the ocean's unspoken truths. The cosmic dance between the sea and the sky, a symphony of shared secrets, forever echoing in the ethereal expanse.\n    """\n\ndist = distance_longest_common_subsequence(story, context)\n@info "The closest context to the query: "$(first(story,20))..." is: "$(context[argmin(dist)])" (distance: $(minimum(dist)))"

source


', 20) ])); } const text_utilities_intro = /* @__PURE__ */ _export_sfc(_sfc_main, [["render", _sfc_render]]); diff --git a/previews/PR218/assets/reference.md.DMO0NW9D.js b/previews/PR218/assets/reference.md.Bl4MWKuL.js similarity index 96% rename from previews/PR218/assets/reference.md.DMO0NW9D.js rename to previews/PR218/assets/reference.md.Bl4MWKuL.js index 44ef1f481..061682a66 100644 --- a/previews/PR218/assets/reference.md.DMO0NW9D.js +++ b/previews/PR218/assets/reference.md.Bl4MWKuL.js @@ -12,7 +12,7 @@ const _hoisted_8 = { style: { "border-width": "1px", "border-style": "solid", "b const _hoisted_9 = { style: { "border-width": "1px", "border-style": "solid", "border-color": "black", "padding": "1em", "border-radius": "25px" } }; function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { return openBlock(), createElementBlock("div", null, [ - _cache[47] || (_cache[47] = createStaticVNode('

Reference

# PromptingTools.ALLOWED_PREFERENCESConstant.

Keys that are allowed to be set via set_preferences!

source


# PromptingTools.ALTERNATIVE_GENERATION_COSTSConstant.
julia
ALTERNATIVE_GENERATION_COSTS

Tracker of alternative costing models, eg, for image generation (dall-e-3), the cost is driven by quality/size.

source


# PromptingTools.ANTHROPIC_TOOL_PROMPTConstant.

Simple template to add to the System Message when doing data extraction with Anthropic models.

It has 2 placeholders: tool_name, tool_description and tool_parameters that are filled with the tool's name, description and parameters. Source: https://docs.anthropic.com/claude/docs/functions-external-tools

source


# PromptingTools.CONV_HISTORYConstant.
julia
CONV_HISTORY

Tracks the most recent conversations through the ai_str macros.

Preference available: MAX_HISTORY_LENGTH, which sets how many last messages should be remembered.

See also: push_conversation!, resize_conversation!

source


# PromptingTools.MODEL_ALIASESConstant.
julia
MODEL_ALIASES

A dictionary of model aliases. Aliases are used to refer to models by their aliases instead of their full names to make it more convenient to use them.

Accessing the aliases

PromptingTools.MODEL_ALIASES["gpt3"]

Register a new model alias

julia
PromptingTools.MODEL_ALIASES["gpt3"] = "gpt-3.5-turbo"

source


# PromptingTools.MODEL_REGISTRYConstant.
julia
MODEL_REGISTRY

A store of available model names and their specs (ie, name, costs per token, etc.)

Accessing the registry

You can use both the alias name or the full name to access the model spec:

PromptingTools.MODEL_REGISTRY["gpt-3.5-turbo"]

Registering a new model

julia
register_model!(\n    name = "gpt-3.5-turbo",\n    schema = :OpenAISchema,\n    cost_of_token_prompt = 0.0015,\n    cost_of_token_generation = 0.002,\n    description = "GPT-3.5 Turbo is a 175B parameter model and a common default on the OpenAI API.")

Registering a model alias

julia
PromptingTools.MODEL_ALIASES["gpt3"] = "gpt-3.5-turbo"

source


# PromptingTools.OPENAI_TOKEN_IDS_GPT35_GPT4Constant.

Token IDs for GPT3.5 and GPT4 from https://platform.openai.com/tokenizer

source


# PromptingTools.PREFERENCESConstant.
julia
PREFERENCES

You can set preferences for PromptingTools by setting environment variables or by using the set_preferences!. It will create a LocalPreferences.toml file in your current directory and will reload your prefences from there.

Check your preferences by calling get_preferences(key::String).

Available Preferences (for set_preferences!)

At the moment it is not possible to persist changes to MODEL_REGISTRY across sessions. Define your register_model!() calls in your startup.jl file to make them available across sessions or put them at the top of your script.

Available ENV Variables

Preferences.jl takes priority over ENV variables, so if you set a preference, it will take precedence over the ENV variable.

WARNING: NEVER EVER sync your LocalPreferences.toml file! It contains your API key and other sensitive information!!!

source


# PromptingTools.RESERVED_KWARGSConstant.

The following keywords are reserved for internal use in the ai* functions and cannot be used as placeholders in the Messages

source


# PromptingTools.AICodeType.
julia
AICode(code::AbstractString; auto_eval::Bool=true, safe_eval::Bool=false, \nskip_unsafe::Bool=false, capture_stdout::Bool=true, verbose::Bool=false,\nprefix::AbstractString="", suffix::AbstractString="", remove_tests::Bool=false, execution_timeout::Int = 60)\n\nAICode(msg::AIMessage; auto_eval::Bool=true, safe_eval::Bool=false, \nskip_unsafe::Bool=false, skip_invalid::Bool=false, capture_stdout::Bool=true,\nverbose::Bool=false, prefix::AbstractString="", suffix::AbstractString="", remove_tests::Bool=false, execution_timeout::Int = 60)

A mutable structure representing a code block (received from the AI model) with automatic parsing, execution, and output/error capturing capabilities.

Upon instantiation with a string, the AICode object automatically runs a code parser and executor (via PromptingTools.eval!()), capturing any standard output (stdout) or errors. This structure is useful for programmatically handling and evaluating Julia code snippets.

See also: PromptingTools.extract_code_blocks, PromptingTools.eval!

Workflow

Properties

Keyword Arguments

Methods

Examples

julia
code = AICode("println("Hello, World!")") # Auto-parses and evaluates the code, capturing output and errors.\nisvalid(code) # Output: true\ncode.stdout # Output: "Hello, World!\n"

We try to evaluate "safely" by default (eg, inside a custom module, to avoid changing user variables). You can avoid that with save_eval=false:

julia
code = AICode("new_variable = 1"; safe_eval=false)\nisvalid(code) # Output: true\nnew_variable # Output: 1

You can also call AICode directly on an AIMessage, which will extract the Julia code blocks, concatenate them and evaluate them:

julia
msg = aigenerate("In Julia, how do you create a vector of 10 random numbers?")\ncode = AICode(msg)\n# Output: AICode(Success: True, Parsed: True, Evaluated: True, Error Caught: N/A, StdOut: True, Code: 2 Lines)\n\n# show the code\ncode.code |> println\n# Output: \n# numbers = rand(10)\n# numbers = rand(1:100, 10)\n\n# or copy it to the clipboard\ncode.code |> clipboard\n\n# or execute it in the current module (=Main)\neval(code.expression)

source


# PromptingTools.AIMessageType.
julia
AIMessage

A message type for AI-generated text-based responses. Returned by aigenerate, aiclassify, and aiscan functions.

Fields

source


# PromptingTools.AITemplateType.
julia
AITemplate

AITemplate is a template for a conversation prompt. This type is merely a container for the template name, which is resolved into a set of messages (=prompt) by render.

Naming Convention

Examples

Save time by re-using pre-made templates, just fill in the placeholders with the keyword arguments:

julia
msg = aigenerate(:JuliaExpertAsk; ask = "How do I add packages?")

The above is equivalent to a more verbose version that explicitly uses the dispatch on AITemplate:

julia
msg = aigenerate(AITemplate(:JuliaExpertAsk); ask = "How do I add packages?")

Find available templates with aitemplates:

julia
tmps = aitemplates("JuliaExpertAsk")\n# Will surface one specific template\n# 1-element Vector{AITemplateMetadata}:\n# PromptingTools.AITemplateMetadata\n#   name: Symbol JuliaExpertAsk\n#   description: String "For asking questions about Julia language. Placeholders: `ask`"\n#   version: String "1"\n#   wordcount: Int64 237\n#   variables: Array{Symbol}((1,))\n#   system_preview: String "You are a world-class Julia language programmer with the knowledge of the latest syntax. Your commun"\n#   user_preview: String "# Question\n\n{{ask}}"\n#   source: String ""

The above gives you a good idea of what the template is about, what placeholders are available, and how much it would cost to use it (=wordcount).

Search for all Julia-related templates:

julia
tmps = aitemplates("Julia")\n# 2-element Vector{AITemplateMetadata}... -> more to come later!

If you are on VSCode, you can leverage nice tabular display with vscodedisplay:

julia
using DataFrames\ntmps = aitemplates("Julia") |> DataFrame |> vscodedisplay

I have my selected template, how do I use it? Just use the "name" in aigenerate or aiclassify like you see in the first example!

You can inspect any template by "rendering" it (this is what the LLM will see):

julia
julia> AITemplate(:JudgeIsItTrue) |> PromptingTools.render

See also: save_template, load_template, load_templates! for more advanced use cases (and the corresponding script in examples/ folder)

source


# PromptingTools.AITemplateMetadataType.

Helper for easy searching and reviewing of templates. Defined on loading of each template.

source


# PromptingTools.AbstractPromptSchemaType.

Defines different prompting styles based on the model training and fine-tuning.

source


# PromptingTools.AbstractToolType.
julia
AbstractTool

Abstract type for all tool types.

Required fields:

source


', 32)), + _cache[47] || (_cache[47] = createStaticVNode('

Reference

# PromptingTools.ALLOWED_PREFERENCESConstant.

Keys that are allowed to be set via set_preferences!

source


# PromptingTools.ALTERNATIVE_GENERATION_COSTSConstant.
julia
ALTERNATIVE_GENERATION_COSTS

Tracker of alternative costing models, eg, for image generation (dall-e-3), the cost is driven by quality/size.

source


# PromptingTools.ANTHROPIC_TOOL_PROMPTConstant.

Simple template to add to the System Message when doing data extraction with Anthropic models.

It has 2 placeholders: tool_name, tool_description and tool_parameters that are filled with the tool's name, description and parameters. Source: https://docs.anthropic.com/claude/docs/functions-external-tools

source


# PromptingTools.CONV_HISTORYConstant.
julia
CONV_HISTORY

Tracks the most recent conversations through the ai_str macros.

Preference available: MAX_HISTORY_LENGTH, which sets how many last messages should be remembered.

See also: push_conversation!, resize_conversation!

source


# PromptingTools.MODEL_ALIASESConstant.
julia
MODEL_ALIASES

A dictionary of model aliases. Aliases are used to refer to models by their aliases instead of their full names to make it more convenient to use them.

Accessing the aliases

PromptingTools.MODEL_ALIASES["gpt3"]

Register a new model alias

julia
PromptingTools.MODEL_ALIASES["gpt3"] = "gpt-3.5-turbo"

source


# PromptingTools.MODEL_REGISTRYConstant.
julia
MODEL_REGISTRY

A store of available model names and their specs (ie, name, costs per token, etc.)

Accessing the registry

You can use both the alias name or the full name to access the model spec:

PromptingTools.MODEL_REGISTRY["gpt-3.5-turbo"]

Registering a new model

julia
register_model!(\n    name = "gpt-3.5-turbo",\n    schema = :OpenAISchema,\n    cost_of_token_prompt = 0.0015,\n    cost_of_token_generation = 0.002,\n    description = "GPT-3.5 Turbo is a 175B parameter model and a common default on the OpenAI API.")

Registering a model alias

julia
PromptingTools.MODEL_ALIASES["gpt3"] = "gpt-3.5-turbo"

source


# PromptingTools.OPENAI_TOKEN_IDS_GPT35_GPT4Constant.

Token IDs for GPT3.5 and GPT4 from https://platform.openai.com/tokenizer

source


# PromptingTools.PREFERENCESConstant.
julia
PREFERENCES

You can set preferences for PromptingTools by setting environment variables or by using the set_preferences!. It will create a LocalPreferences.toml file in your current directory and will reload your prefences from there.

Check your preferences by calling get_preferences(key::String).

Available Preferences (for set_preferences!)

At the moment it is not possible to persist changes to MODEL_REGISTRY across sessions. Define your register_model!() calls in your startup.jl file to make them available across sessions or put them at the top of your script.

Available ENV Variables

Preferences.jl takes priority over ENV variables, so if you set a preference, it will take precedence over the ENV variable.

WARNING: NEVER EVER sync your LocalPreferences.toml file! It contains your API key and other sensitive information!!!

source


# PromptingTools.RESERVED_KWARGSConstant.

The following keywords are reserved for internal use in the ai* functions and cannot be used as placeholders in the Messages

source


# PromptingTools.AICodeType.
julia
AICode(code::AbstractString; auto_eval::Bool=true, safe_eval::Bool=false, \nskip_unsafe::Bool=false, capture_stdout::Bool=true, verbose::Bool=false,\nprefix::AbstractString="", suffix::AbstractString="", remove_tests::Bool=false, execution_timeout::Int = 60)\n\nAICode(msg::AIMessage; auto_eval::Bool=true, safe_eval::Bool=false, \nskip_unsafe::Bool=false, skip_invalid::Bool=false, capture_stdout::Bool=true,\nverbose::Bool=false, prefix::AbstractString="", suffix::AbstractString="", remove_tests::Bool=false, execution_timeout::Int = 60)

A mutable structure representing a code block (received from the AI model) with automatic parsing, execution, and output/error capturing capabilities.

Upon instantiation with a string, the AICode object automatically runs a code parser and executor (via PromptingTools.eval!()), capturing any standard output (stdout) or errors. This structure is useful for programmatically handling and evaluating Julia code snippets.

See also: PromptingTools.extract_code_blocks, PromptingTools.eval!

Workflow

Properties

Keyword Arguments

Methods

Examples

julia
code = AICode("println("Hello, World!")") # Auto-parses and evaluates the code, capturing output and errors.\nisvalid(code) # Output: true\ncode.stdout # Output: "Hello, World!\n"

We try to evaluate "safely" by default (eg, inside a custom module, to avoid changing user variables). You can avoid that with save_eval=false:

julia
code = AICode("new_variable = 1"; safe_eval=false)\nisvalid(code) # Output: true\nnew_variable # Output: 1

You can also call AICode directly on an AIMessage, which will extract the Julia code blocks, concatenate them and evaluate them:

julia
msg = aigenerate("In Julia, how do you create a vector of 10 random numbers?")\ncode = AICode(msg)\n# Output: AICode(Success: True, Parsed: True, Evaluated: True, Error Caught: N/A, StdOut: True, Code: 2 Lines)\n\n# show the code\ncode.code |> println\n# Output: \n# numbers = rand(10)\n# numbers = rand(1:100, 10)\n\n# or copy it to the clipboard\ncode.code |> clipboard\n\n# or execute it in the current module (=Main)\neval(code.expression)

source


# PromptingTools.AIMessageType.
julia
AIMessage

A message type for AI-generated text-based responses. Returned by aigenerate, aiclassify, and aiscan functions.

Fields

source


# PromptingTools.AITemplateType.
julia
AITemplate

AITemplate is a template for a conversation prompt. This type is merely a container for the template name, which is resolved into a set of messages (=prompt) by render.

Naming Convention

Examples

Save time by re-using pre-made templates, just fill in the placeholders with the keyword arguments:

julia
msg = aigenerate(:JuliaExpertAsk; ask = "How do I add packages?")

The above is equivalent to a more verbose version that explicitly uses the dispatch on AITemplate:

julia
msg = aigenerate(AITemplate(:JuliaExpertAsk); ask = "How do I add packages?")

Find available templates with aitemplates:

julia
tmps = aitemplates("JuliaExpertAsk")\n# Will surface one specific template\n# 1-element Vector{AITemplateMetadata}:\n# PromptingTools.AITemplateMetadata\n#   name: Symbol JuliaExpertAsk\n#   description: String "For asking questions about Julia language. Placeholders: `ask`"\n#   version: String "1"\n#   wordcount: Int64 237\n#   variables: Array{Symbol}((1,))\n#   system_preview: String "You are a world-class Julia language programmer with the knowledge of the latest syntax. Your commun"\n#   user_preview: String "# Question\n\n{{ask}}"\n#   source: String ""

The above gives you a good idea of what the template is about, what placeholders are available, and how much it would cost to use it (=wordcount).

Search for all Julia-related templates:

julia
tmps = aitemplates("Julia")\n# 2-element Vector{AITemplateMetadata}... -> more to come later!

If you are on VSCode, you can leverage nice tabular display with vscodedisplay:

julia
using DataFrames\ntmps = aitemplates("Julia") |> DataFrame |> vscodedisplay

I have my selected template, how do I use it? Just use the "name" in aigenerate or aiclassify like you see in the first example!

You can inspect any template by "rendering" it (this is what the LLM will see):

julia
julia> AITemplate(:JudgeIsItTrue) |> PromptingTools.render

See also: save_template, load_template, load_templates! for more advanced use cases (and the corresponding script in examples/ folder)

source


# PromptingTools.AITemplateMetadataType.

Helper for easy searching and reviewing of templates. Defined on loading of each template.

source


# PromptingTools.AbstractPromptSchemaType.

Defines different prompting styles based on the model training and fine-tuning.

source


# PromptingTools.AbstractToolType.
julia
AbstractTool

Abstract type for all tool types.

Required fields:

source


', 32)), createBaseVNode("div", _hoisted_1, [ _cache[4] || (_cache[4] = createStaticVNode('# PromptingTools.AnthropicSchemaType.
julia
AnthropicSchema <: AbstractAnthropicSchema

AnthropicSchema is the default schema for Anthropic API models (eg, Claude). See more information here.

It uses the following conversation template:

Dict(role="user",content="..."),Dict(role="assistant",content="...")]

system messages are provided as a keyword argument to the API call.

', 11)), createBaseVNode("p", null, [ @@ -28,13 +28,13 @@ function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { ]), _cache[5] || (_cache[5] = createBaseVNode("p", null, [ createBaseVNode("a", { - href: "https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/llm_interface.jl#L327-L341", + href: "https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/llm_interface.jl#L327-L341", target: "_blank", rel: "noreferrer" }, "source") ], -1)) ]), - _cache[48] || (_cache[48] = createStaticVNode('
# PromptingTools.AzureOpenAISchemaType.

AzureOpenAISchema

AzureOpenAISchema() allows user to call Azure OpenAI API. API Reference

Requires two environment variables to be set:

source


# PromptingTools.CerebrasOpenAISchemaType.
julia
CerebrasOpenAISchema

Schema to call the Cerebras API.

Links:

Requires one environment variable to be set:

source


# PromptingTools.ChatMLSchemaType.

ChatMLSchema is used by many open-source chatbots, by OpenAI models (under the hood) and by several models and inferfaces (eg, Ollama, vLLM)

You can explore it on tiktokenizer

It uses the following conversation structure:

<im_start>system\n...<im_end>\n<|im_start|>user\n...<|im_end|>\n<|im_start|>assistant\n...<|im_end|>

source


# PromptingTools.CustomOpenAISchemaType.
julia
CustomOpenAISchema

CustomOpenAISchema() allows user to call any OpenAI-compatible API.

All user needs to do is to pass this schema as the first argument and provide the BASE URL of the API to call (api_kwargs.url).

Example

Assumes that we have a local server running at http://127.0.0.1:8081:

julia
api_key = "..."\nprompt = "Say hi!"\nmsg = aigenerate(CustomOpenAISchema(), prompt; model="my_model", api_key, api_kwargs=(; url="http://127.0.0.1:8081"))

source


# PromptingTools.DataMessageType.
julia
DataMessage

A message type for AI-generated data-based responses, ie, different content than text. Returned by aiextract, and aiextract functions.

Fields

source


# PromptingTools.DatabricksOpenAISchemaType.
julia
DatabricksOpenAISchema

DatabricksOpenAISchema() allows user to call Databricks Foundation Model API. API Reference

Requires two environment variables to be set:

source


# PromptingTools.DeepSeekOpenAISchemaType.
julia
DeepSeekOpenAISchema

Schema to call the DeepSeek API.

Links:

Requires one environment variables to be set:

source


# PromptingTools.FireworksOpenAISchemaType.
julia
FireworksOpenAISchema

Schema to call the Fireworks.ai API.

Links:

Requires one environment variables to be set:

source


# PromptingTools.GoogleSchemaType.

Calls Google's Gemini API. See more information here. It's available only for some regions.

source


# PromptingTools.GroqOpenAISchemaType.
julia
GroqOpenAISchema

Schema to call the groq.com API.

Links:

Requires one environment variables to be set:

source


# PromptingTools.ItemsExtractType.

Extract zero, one or more specified items from the provided data.

source


# PromptingTools.LocalServerOpenAISchemaType.
julia
LocalServerOpenAISchema

Designed to be used with local servers. It's automatically called with model alias "local" (see MODEL_REGISTRY).

This schema is a flavor of CustomOpenAISchema with a url keypreset by global Preference keyLOCAL_SERVER. See?PREFERENCESfor more details on how to change it. It assumes that the server follows OpenAI API conventions (eg,POST /v1/chat/completions`).

Note: Llama.cpp (and hence Llama.jl built on top of it) do NOT support embeddings endpoint! You'll get an address error.

Example

Assumes that we have a local server running at http://127.0.0.1:10897/v1 (port and address used by Llama.jl, "v1" at the end is needed for OpenAI endpoint compatibility):

Three ways to call it:

julia
\n# Use @ai_str with "local" alias\nai"Say hi!"local\n\n# model="local"\naigenerate("Say hi!"; model="local")\n\n# Or set schema explicitly\nconst PT = PromptingTools\nmsg = aigenerate(PT.LocalServerOpenAISchema(), "Say hi!")

How to start a LLM local server? You can use run_server function from Llama.jl. Use a separate Julia session.

julia
using Llama\nmodel = "...path..." # see Llama.jl README how to download one\nrun_server(; model)

To change the default port and address:

julia
# For a permanent change, set the preference:\nusing Preferences\nset_preferences!("LOCAL_SERVER"=>"http://127.0.0.1:10897/v1")\n\n# Or if it's a temporary fix, just change the variable `LOCAL_SERVER`:\nconst PT = PromptingTools\nPT.LOCAL_SERVER = "http://127.0.0.1:10897/v1"

source


# PromptingTools.MaybeExtractType.

Extract a result from the provided data, if any, otherwise set the error and message fields.

Arguments

source


# PromptingTools.MistralOpenAISchemaType.
julia
MistralOpenAISchema

MistralOpenAISchema() allows user to call MistralAI API known for mistral and mixtral models.

It's a flavor of CustomOpenAISchema() with a url preset to https://api.mistral.ai.

Most models have been registered, so you don't even have to specify the schema

Example

Let's call mistral-tiny model:

julia
api_key = "..." # can be set via ENV["MISTRAL_API_KEY"] or via our preference system\nmsg = aigenerate("Say hi!"; model="mistral_tiny", api_key)

See ?PREFERENCES for more details on how to set your API key permanently.

source


# PromptingTools.ModelSpecType.
julia
ModelSpec

A struct that contains information about a model, such as its name, schema, cost per token, etc.

Fields

Example

julia
spec = ModelSpec("gpt-3.5-turbo",\n    OpenAISchema(),\n    0.0015,\n    0.002,\n    "GPT-3.5 Turbo is a 175B parameter model and a common default on the OpenAI API.")\n\n# register it\nPromptingTools.register_model!(spec)

But you can also register any model directly via keyword arguments:

julia
PromptingTools.register_model!(\n    name = "gpt-3.5-turbo",\n    schema = OpenAISchema(),\n    cost_of_token_prompt = 0.0015,\n    cost_of_token_generation = 0.002,\n    description = "GPT-3.5 Turbo is a 175B parameter model and a common default on the OpenAI API.")

source


# PromptingTools.NoSchemaType.

Schema that keeps messages (<:AbstractMessage) and does not transform for any specific model. It used by the first pass of the prompt rendering system (see ?render).

source


# PromptingTools.OllamaManagedSchemaType.

Ollama by default manages different models and their associated prompt schemas when you pass system_prompt and prompt fields to the API.

Warning: It works only for 1 system message and 1 user message, so anything more than that has to be rejected.

If you need to pass more messagese / longer conversational history, you can use define the model-specific schema directly and pass your Ollama requests with raw=true, which disables and templating and schema management by Ollama.

source


# PromptingTools.OllamaSchemaType.

OllamaSchema is the default schema for Olama models.

It uses the following conversation template:

[Dict(role="system",content="..."),Dict(role="user",content="..."),Dict(role="assistant",content="...")]

It's very similar to OpenAISchema, but it appends images differently.

source


# PromptingTools.OpenAISchemaType.

OpenAISchema is the default schema for OpenAI models.

It uses the following conversation template:

[Dict(role="system",content="..."),Dict(role="user",content="..."),Dict(role="assistant",content="...")]

It's recommended to separate sections in your prompt with markdown headers (e.g. `##Answer

`).

source


# PromptingTools.OpenRouterOpenAISchemaType.
julia
OpenRouterOpenAISchema

Schema to call the OpenRouter API.

Links:

Requires one environment variable to be set:

source


# PromptingTools.SaverSchemaType.
julia
SaverSchema <: AbstractTracerSchema

SaverSchema is a schema that automatically saves the conversation to the disk. It's useful for debugging and for persistent logging.

It can be composed with any other schema, eg, TracerSchema to save additional metadata.

Set environment variable LOG_DIR to the directory where you want to save the conversation (see ?PREFERENCES). Conversations are named by the hash of the first message in the conversation to naturally group subsequent conversations together.

If you need to provide logging directory of the file name dynamically, you can provide the following arguments to tracer_kwargs:

To use it automatically, re-register the models you use with the schema wrapped in SaverSchema

See also: meta, unwrap, TracerSchema, initialize_tracer, finalize_tracer

Example

julia
using PromptingTools: TracerSchema, OpenAISchema, SaverSchema\n# This schema will first trace the metadata (change to TraceMessage) and then save the conversation to the disk\n\nwrap_schema = OpenAISchema() |> TracerSchema |> SaverSchema\nconv = aigenerate(wrap_schema,:BlankSystemUser; system="You're a French-speaking assistant!",\n    user="Say hi!", model="gpt-4", api_kwargs=(;temperature=0.1), return_all=true)\n\n# conv is a vector of messages that will be saved to a JSON together with metadata about the template and api_kwargs

If you wanted to enable this automatically for models you use, you can do it like this:

julia
PT.register_model!(; name= "gpt-3.5-turbo", schema=OpenAISchema() |> TracerSchema |> SaverSchema)

Any subsequent calls model="gpt-3.5-turbo" will automatically capture metadata and save the conversation to the disk.

To provide logging file path explicitly, use the tracer_kwargs:

julia
conv = aigenerate(wrap_schema,:BlankSystemUser; system="You're a French-speaking assistant!",\n    user="Say hi!", model="gpt-4", api_kwargs=(;temperature=0.1), return_all=true,\n    tracer_kwargs=(; log_file_path="my_logs/my_log.json"))

source


# PromptingTools.ShareGPTSchemaType.
julia
ShareGPTSchema <: AbstractShareGPTSchema

Frequently used schema for finetuning LLMs. Conversations are recorded as a vector of dicts with keys from and value (similar to OpenAI).

source


# PromptingTools.StreamCallbackType.
julia
StreamCallback

Simplest callback for streaming message, which just prints the content to the output stream defined by out. When streaming is over, it builds the response body from the chunks and returns it as if it was a normal response from the API.

For more complex use cases, you can define your own callback. See the interface description below for more information.

Fields

Interface

streamed_request! composes of:

If you want to implement your own callback, you can create your own methods for the interface functions. Eg, if you want to print the streamed chunks into some specialized sink or Channel, you could define a simple method just for print_content.

Example

julia
using PromptingTools\nconst PT = PromptingTools\n\n# Simplest usage, just provide where to steam the text (we build the callback for you)\nmsg = aigenerate("Count from 1 to 100."; streamcallback = stdout)\n\nstreamcallback = PT.StreamCallback() # record all chunks\nmsg = aigenerate("Count from 1 to 100."; streamcallback)\n# this allows you to inspect each chunk with `streamcallback.chunks`\n\n# Get verbose output with details of each chunk for debugging\nstreamcallback = PT.StreamCallback(; verbose=true, throw_on_error=true)\nmsg = aigenerate("Count from 1 to 10."; streamcallback)

Note: If you provide a StreamCallback object to aigenerate, we will configure it and necessary api_kwargs via configure_callback! unless you specify the flavor field. If you provide a StreamCallback with a specific flavor, we leave all configuration to the user (eg, you need to provide the correct api_kwargs).

source


# PromptingTools.StreamChunkType.
julia
StreamChunk

A chunk of streaming data. A message is composed of multiple chunks.

Fields

source


# PromptingTools.TestEchoAnthropicSchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TestEchoGoogleSchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TestEchoOllamaManagedSchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TestEchoOllamaSchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TestEchoOpenAISchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TogetherOpenAISchemaType.
julia
TogetherOpenAISchema

Schema to call the Together.ai API.

Links:

Requires one environment variables to be set:

source


# PromptingTools.ToolType.
julia
Tool

A tool that can be sent to an LLM for execution ("function calling").

Arguments

See also: AbstractTool, tool_call_signature

source


# PromptingTools.ToolMethod.
julia
Tool(callable::Union{Function, Type, Method}; kwargs...)

Create a Tool from a callable object (function, type, or method).

Arguments

Returns

Examples

julia
# Create a tool from a function\ntool = Tool(my_function)\n\n# Create a tool from a type\ntool = Tool(MyStruct)

source


# PromptingTools.TracerMessageType.
julia
TracerMessage{T <: Union{AbstractChatMessage, AbstractDataMessage}} <: AbstractTracerMessage

A mutable wrapper message designed for tracing the flow of messages through the system, allowing for iterative updates and providing additional metadata for observability.

Fields

This structure is particularly useful for debugging, monitoring, and auditing the flow of messages in systems that involve complex interactions or asynchronous processing.

All fields are optional besides the object.

Useful methods: pprint (pretty prints the underlying message), unwrap (to get the object out of tracer), align_tracer! (to set all shared IDs in a vector of tracers to the same), istracermessage to check if given message is an AbstractTracerMessage

Example

julia
wrap_schema = PT.TracerSchema(PT.OpenAISchema())\nmsg = aigenerate(wrap_schema, "Say hi!"; model = "gpt4t")\nmsg # isa TracerMessage\nmsg.content # access content like if it was the message

source


# PromptingTools.TracerMessageLikeType.
julia
TracerMessageLike{T <: Any} <: AbstractTracer

A mutable structure designed for general-purpose tracing within the system, capable of handling any type of object that is part of the AI Conversation. It provides a flexible way to track and annotate objects as they move through different parts of the system, facilitating debugging, monitoring, and auditing.

Fields

This structure is particularly useful for systems that involve complex interactions or asynchronous processing, where tracking the flow and transformation of objects is crucial.

All fields are optional besides the object.

source


# PromptingTools.TracerSchemaType.
julia
TracerSchema <: AbstractTracerSchema

A schema designed to wrap another schema, enabling pre- and post-execution callbacks for tracing and additional functionalities. This type is specifically utilized within the TracerMessage type to trace the execution flow, facilitating observability and debugging in complex conversational AI systems.

The TracerSchema acts as a middleware, allowing developers to insert custom logic before and after the execution of the primary schema's functionality. This can include logging, performance measurement, or any other form of tracing required to understand or improve the execution flow.

TracerSchema automatically wraps messages in TracerMessage type, which has several important fields, eg,

See also: meta, unwrap, SaverSchema, initialize_tracer, finalize_tracer

Example

julia
wrap_schema = TracerSchema(OpenAISchema())\nmsg = aigenerate(wrap_schema, "Say hi!"; model="gpt-4")\n# output type should be TracerMessage\nmsg isa TracerMessage

You can define your own tracer schema and the corresponding methods: initialize_tracer, finalize_tracer. See src/llm_tracer.jl

source


# PromptingTools.UserMessageType.
julia
UserMessage

A message type for user-generated text-based responses. Consumed by ai* functions to generate responses.

Fields

source


# PromptingTools.UserMessageWithImagesType.
julia
UserMessageWithImages

A message type for user-generated text-based responses with images. Consumed by ai* functions to generate responses.

Fields

source


# PromptingTools.UserMessageWithImagesMethod.

Construct UserMessageWithImages with 1 or more images. Images can be either URLs or local paths.

source


# PromptingTools.X123Type.

With docstring

source


# OpenAI.create_chatMethod.
julia
OpenAI.create_chat(schema::CustomOpenAISchema,\n    api_key::AbstractString,\n    model::AbstractString,\n    conversation;\n    http_kwargs::NamedTuple = NamedTuple(),\n    streamcallback::Any = nothing,\n    url::String = "http://localhost:8080",\n    kwargs...)

Dispatch to the OpenAI.create_chat function, for any OpenAI-compatible API.

It expects url keyword argument. Provide it to the aigenerate function via api_kwargs=(; url="my-url")

It will forward your query to the "chat/completions" endpoint of the base URL that you provided (=url).

source


# OpenAI.create_chatMethod.
julia
OpenAI.create_chat(schema::LocalServerOpenAISchema,\n    api_key::AbstractString,\n    model::AbstractString,\n    conversation;\n    url::String = "http://localhost:8080",\n    kwargs...)

Dispatch to the OpenAI.create_chat function, but with the LocalServer API parameters, ie, defaults to url specified by the LOCAL_SERVERpreference. See?PREFERENCES

source


# OpenAI.create_chatMethod.
julia
OpenAI.create_chat(schema::MistralOpenAISchema,

api_key::AbstractString, model::AbstractString, conversation; url::String="https://api.mistral.ai/v1", kwargs...)

Dispatch to the OpenAI.create_chat function, but with the MistralAI API parameters.

It tries to access the MISTRALAI_API_KEY ENV variable, but you can also provide it via the api_key keyword argument.

source


# PromptingTools.aiclassifyMethod.
julia
aiclassify(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiclassify call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

source


', 87)), + _cache[48] || (_cache[48] = createStaticVNode('
# PromptingTools.AzureOpenAISchemaType.

AzureOpenAISchema

AzureOpenAISchema() allows user to call Azure OpenAI API. API Reference

Requires two environment variables to be set:

source


# PromptingTools.CerebrasOpenAISchemaType.
julia
CerebrasOpenAISchema

Schema to call the Cerebras API.

Links:

Requires one environment variable to be set:

source


# PromptingTools.ChatMLSchemaType.

ChatMLSchema is used by many open-source chatbots, by OpenAI models (under the hood) and by several models and inferfaces (eg, Ollama, vLLM)

You can explore it on tiktokenizer

It uses the following conversation structure:

<im_start>system\n...<im_end>\n<|im_start|>user\n...<|im_end|>\n<|im_start|>assistant\n...<|im_end|>

source


# PromptingTools.CustomOpenAISchemaType.
julia
CustomOpenAISchema

CustomOpenAISchema() allows user to call any OpenAI-compatible API.

All user needs to do is to pass this schema as the first argument and provide the BASE URL of the API to call (api_kwargs.url).

Example

Assumes that we have a local server running at http://127.0.0.1:8081:

julia
api_key = "..."\nprompt = "Say hi!"\nmsg = aigenerate(CustomOpenAISchema(), prompt; model="my_model", api_key, api_kwargs=(; url="http://127.0.0.1:8081"))

source


# PromptingTools.DataMessageType.
julia
DataMessage

A message type for AI-generated data-based responses, ie, different content than text. Returned by aiextract, and aiextract functions.

Fields

source


# PromptingTools.DatabricksOpenAISchemaType.
julia
DatabricksOpenAISchema

DatabricksOpenAISchema() allows user to call Databricks Foundation Model API. API Reference

Requires two environment variables to be set:

source


# PromptingTools.DeepSeekOpenAISchemaType.
julia
DeepSeekOpenAISchema

Schema to call the DeepSeek API.

Links:

Requires one environment variables to be set:

source


# PromptingTools.FireworksOpenAISchemaType.
julia
FireworksOpenAISchema

Schema to call the Fireworks.ai API.

Links:

Requires one environment variables to be set:

source


# PromptingTools.GoogleSchemaType.

Calls Google's Gemini API. See more information here. It's available only for some regions.

source


# PromptingTools.GroqOpenAISchemaType.
julia
GroqOpenAISchema

Schema to call the groq.com API.

Links:

Requires one environment variables to be set:

source


# PromptingTools.ItemsExtractType.

Extract zero, one or more specified items from the provided data.

source


# PromptingTools.LocalServerOpenAISchemaType.
julia
LocalServerOpenAISchema

Designed to be used with local servers. It's automatically called with model alias "local" (see MODEL_REGISTRY).

This schema is a flavor of CustomOpenAISchema with a url keypreset by global Preference keyLOCAL_SERVER. See?PREFERENCESfor more details on how to change it. It assumes that the server follows OpenAI API conventions (eg,POST /v1/chat/completions`).

Note: Llama.cpp (and hence Llama.jl built on top of it) do NOT support embeddings endpoint! You'll get an address error.

Example

Assumes that we have a local server running at http://127.0.0.1:10897/v1 (port and address used by Llama.jl, "v1" at the end is needed for OpenAI endpoint compatibility):

Three ways to call it:

julia
\n# Use @ai_str with "local" alias\nai"Say hi!"local\n\n# model="local"\naigenerate("Say hi!"; model="local")\n\n# Or set schema explicitly\nconst PT = PromptingTools\nmsg = aigenerate(PT.LocalServerOpenAISchema(), "Say hi!")

How to start a LLM local server? You can use run_server function from Llama.jl. Use a separate Julia session.

julia
using Llama\nmodel = "...path..." # see Llama.jl README how to download one\nrun_server(; model)

To change the default port and address:

julia
# For a permanent change, set the preference:\nusing Preferences\nset_preferences!("LOCAL_SERVER"=>"http://127.0.0.1:10897/v1")\n\n# Or if it's a temporary fix, just change the variable `LOCAL_SERVER`:\nconst PT = PromptingTools\nPT.LOCAL_SERVER = "http://127.0.0.1:10897/v1"

source


# PromptingTools.MaybeExtractType.

Extract a result from the provided data, if any, otherwise set the error and message fields.

Arguments

source


# PromptingTools.MistralOpenAISchemaType.
julia
MistralOpenAISchema

MistralOpenAISchema() allows user to call MistralAI API known for mistral and mixtral models.

It's a flavor of CustomOpenAISchema() with a url preset to https://api.mistral.ai.

Most models have been registered, so you don't even have to specify the schema

Example

Let's call mistral-tiny model:

julia
api_key = "..." # can be set via ENV["MISTRAL_API_KEY"] or via our preference system\nmsg = aigenerate("Say hi!"; model="mistral_tiny", api_key)

See ?PREFERENCES for more details on how to set your API key permanently.

source


# PromptingTools.ModelSpecType.
julia
ModelSpec

A struct that contains information about a model, such as its name, schema, cost per token, etc.

Fields

Example

julia
spec = ModelSpec("gpt-3.5-turbo",\n    OpenAISchema(),\n    0.0015,\n    0.002,\n    "GPT-3.5 Turbo is a 175B parameter model and a common default on the OpenAI API.")\n\n# register it\nPromptingTools.register_model!(spec)

But you can also register any model directly via keyword arguments:

julia
PromptingTools.register_model!(\n    name = "gpt-3.5-turbo",\n    schema = OpenAISchema(),\n    cost_of_token_prompt = 0.0015,\n    cost_of_token_generation = 0.002,\n    description = "GPT-3.5 Turbo is a 175B parameter model and a common default on the OpenAI API.")

source


# PromptingTools.NoSchemaType.

Schema that keeps messages (<:AbstractMessage) and does not transform for any specific model. It used by the first pass of the prompt rendering system (see ?render).

source


# PromptingTools.OllamaManagedSchemaType.

Ollama by default manages different models and their associated prompt schemas when you pass system_prompt and prompt fields to the API.

Warning: It works only for 1 system message and 1 user message, so anything more than that has to be rejected.

If you need to pass more messagese / longer conversational history, you can use define the model-specific schema directly and pass your Ollama requests with raw=true, which disables and templating and schema management by Ollama.

source


# PromptingTools.OllamaSchemaType.

OllamaSchema is the default schema for Olama models.

It uses the following conversation template:

[Dict(role="system",content="..."),Dict(role="user",content="..."),Dict(role="assistant",content="...")]

It's very similar to OpenAISchema, but it appends images differently.

source


# PromptingTools.OpenAISchemaType.

OpenAISchema is the default schema for OpenAI models.

It uses the following conversation template:

[Dict(role="system",content="..."),Dict(role="user",content="..."),Dict(role="assistant",content="...")]

It's recommended to separate sections in your prompt with markdown headers (e.g. `##Answer

`).

source


# PromptingTools.OpenRouterOpenAISchemaType.
julia
OpenRouterOpenAISchema

Schema to call the OpenRouter API.

Links:

Requires one environment variable to be set:

source


# PromptingTools.SaverSchemaType.
julia
SaverSchema <: AbstractTracerSchema

SaverSchema is a schema that automatically saves the conversation to the disk. It's useful for debugging and for persistent logging.

It can be composed with any other schema, eg, TracerSchema to save additional metadata.

Set environment variable LOG_DIR to the directory where you want to save the conversation (see ?PREFERENCES). Conversations are named by the hash of the first message in the conversation to naturally group subsequent conversations together.

If you need to provide logging directory of the file name dynamically, you can provide the following arguments to tracer_kwargs:

To use it automatically, re-register the models you use with the schema wrapped in SaverSchema

See also: meta, unwrap, TracerSchema, initialize_tracer, finalize_tracer

Example

julia
using PromptingTools: TracerSchema, OpenAISchema, SaverSchema\n# This schema will first trace the metadata (change to TraceMessage) and then save the conversation to the disk\n\nwrap_schema = OpenAISchema() |> TracerSchema |> SaverSchema\nconv = aigenerate(wrap_schema,:BlankSystemUser; system="You're a French-speaking assistant!",\n    user="Say hi!", model="gpt-4", api_kwargs=(;temperature=0.1), return_all=true)\n\n# conv is a vector of messages that will be saved to a JSON together with metadata about the template and api_kwargs

If you wanted to enable this automatically for models you use, you can do it like this:

julia
PT.register_model!(; name= "gpt-3.5-turbo", schema=OpenAISchema() |> TracerSchema |> SaverSchema)

Any subsequent calls model="gpt-3.5-turbo" will automatically capture metadata and save the conversation to the disk.

To provide logging file path explicitly, use the tracer_kwargs:

julia
conv = aigenerate(wrap_schema,:BlankSystemUser; system="You're a French-speaking assistant!",\n    user="Say hi!", model="gpt-4", api_kwargs=(;temperature=0.1), return_all=true,\n    tracer_kwargs=(; log_file_path="my_logs/my_log.json"))

source


# PromptingTools.ShareGPTSchemaType.
julia
ShareGPTSchema <: AbstractShareGPTSchema

Frequently used schema for finetuning LLMs. Conversations are recorded as a vector of dicts with keys from and value (similar to OpenAI).

source


# PromptingTools.StreamCallbackType.
julia
StreamCallback

Simplest callback for streaming message, which just prints the content to the output stream defined by out. When streaming is over, it builds the response body from the chunks and returns it as if it was a normal response from the API.

For more complex use cases, you can define your own callback. See the interface description below for more information.

Fields

Interface

streamed_request! composes of:

If you want to implement your own callback, you can create your own methods for the interface functions. Eg, if you want to print the streamed chunks into some specialized sink or Channel, you could define a simple method just for print_content.

Example

julia
using PromptingTools\nconst PT = PromptingTools\n\n# Simplest usage, just provide where to steam the text (we build the callback for you)\nmsg = aigenerate("Count from 1 to 100."; streamcallback = stdout)\n\nstreamcallback = PT.StreamCallback() # record all chunks\nmsg = aigenerate("Count from 1 to 100."; streamcallback)\n# this allows you to inspect each chunk with `streamcallback.chunks`\n\n# Get verbose output with details of each chunk for debugging\nstreamcallback = PT.StreamCallback(; verbose=true, throw_on_error=true)\nmsg = aigenerate("Count from 1 to 10."; streamcallback)

Note: If you provide a StreamCallback object to aigenerate, we will configure it and necessary api_kwargs via configure_callback! unless you specify the flavor field. If you provide a StreamCallback with a specific flavor, we leave all configuration to the user (eg, you need to provide the correct api_kwargs).

source


# PromptingTools.StreamChunkType.
julia
StreamChunk

A chunk of streaming data. A message is composed of multiple chunks.

Fields

source


# PromptingTools.TestEchoAnthropicSchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TestEchoGoogleSchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TestEchoOllamaManagedSchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TestEchoOllamaSchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TestEchoOpenAISchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TogetherOpenAISchemaType.
julia
TogetherOpenAISchema

Schema to call the Together.ai API.

Links:

Requires one environment variables to be set:

source


# PromptingTools.ToolType.
julia
Tool

A tool that can be sent to an LLM for execution ("function calling").

Arguments

See also: AbstractTool, tool_call_signature

source


# PromptingTools.ToolMethod.
julia
Tool(callable::Union{Function, Type, Method}; kwargs...)

Create a Tool from a callable object (function, type, or method).

Arguments

Returns

Examples

julia
# Create a tool from a function\ntool = Tool(my_function)\n\n# Create a tool from a type\ntool = Tool(MyStruct)

source


# PromptingTools.TracerMessageType.
julia
TracerMessage{T <: Union{AbstractChatMessage, AbstractDataMessage}} <: AbstractTracerMessage

A mutable wrapper message designed for tracing the flow of messages through the system, allowing for iterative updates and providing additional metadata for observability.

Fields

This structure is particularly useful for debugging, monitoring, and auditing the flow of messages in systems that involve complex interactions or asynchronous processing.

All fields are optional besides the object.

Useful methods: pprint (pretty prints the underlying message), unwrap (to get the object out of tracer), align_tracer! (to set all shared IDs in a vector of tracers to the same), istracermessage to check if given message is an AbstractTracerMessage

Example

julia
wrap_schema = PT.TracerSchema(PT.OpenAISchema())\nmsg = aigenerate(wrap_schema, "Say hi!"; model = "gpt4t")\nmsg # isa TracerMessage\nmsg.content # access content like if it was the message

source


# PromptingTools.TracerMessageLikeType.
julia
TracerMessageLike{T <: Any} <: AbstractTracer

A mutable structure designed for general-purpose tracing within the system, capable of handling any type of object that is part of the AI Conversation. It provides a flexible way to track and annotate objects as they move through different parts of the system, facilitating debugging, monitoring, and auditing.

Fields

This structure is particularly useful for systems that involve complex interactions or asynchronous processing, where tracking the flow and transformation of objects is crucial.

All fields are optional besides the object.

source


# PromptingTools.TracerSchemaType.
julia
TracerSchema <: AbstractTracerSchema

A schema designed to wrap another schema, enabling pre- and post-execution callbacks for tracing and additional functionalities. This type is specifically utilized within the TracerMessage type to trace the execution flow, facilitating observability and debugging in complex conversational AI systems.

The TracerSchema acts as a middleware, allowing developers to insert custom logic before and after the execution of the primary schema's functionality. This can include logging, performance measurement, or any other form of tracing required to understand or improve the execution flow.

TracerSchema automatically wraps messages in TracerMessage type, which has several important fields, eg,

See also: meta, unwrap, SaverSchema, initialize_tracer, finalize_tracer

Example

julia
wrap_schema = TracerSchema(OpenAISchema())\nmsg = aigenerate(wrap_schema, "Say hi!"; model="gpt-4")\n# output type should be TracerMessage\nmsg isa TracerMessage

You can define your own tracer schema and the corresponding methods: initialize_tracer, finalize_tracer. See src/llm_tracer.jl

source


# PromptingTools.UserMessageType.
julia
UserMessage

A message type for user-generated text-based responses. Consumed by ai* functions to generate responses.

Fields

source


# PromptingTools.UserMessageWithImagesType.
julia
UserMessageWithImages

A message type for user-generated text-based responses with images. Consumed by ai* functions to generate responses.

Fields

source


# PromptingTools.UserMessageWithImagesMethod.

Construct UserMessageWithImages with 1 or more images. Images can be either URLs or local paths.

source


# PromptingTools.X123Type.

With docstring

source


# OpenAI.create_chatMethod.
julia
OpenAI.create_chat(schema::CustomOpenAISchema,\n    api_key::AbstractString,\n    model::AbstractString,\n    conversation;\n    http_kwargs::NamedTuple = NamedTuple(),\n    streamcallback::Any = nothing,\n    url::String = "http://localhost:8080",\n    kwargs...)

Dispatch to the OpenAI.create_chat function, for any OpenAI-compatible API.

It expects url keyword argument. Provide it to the aigenerate function via api_kwargs=(; url="my-url")

It will forward your query to the "chat/completions" endpoint of the base URL that you provided (=url).

source


# OpenAI.create_chatMethod.
julia
OpenAI.create_chat(schema::LocalServerOpenAISchema,\n    api_key::AbstractString,\n    model::AbstractString,\n    conversation;\n    url::String = "http://localhost:8080",\n    kwargs...)

Dispatch to the OpenAI.create_chat function, but with the LocalServer API parameters, ie, defaults to url specified by the LOCAL_SERVERpreference. See?PREFERENCES

source


# OpenAI.create_chatMethod.
julia
OpenAI.create_chat(schema::MistralOpenAISchema,

api_key::AbstractString, model::AbstractString, conversation; url::String="https://api.mistral.ai/v1", kwargs...)

Dispatch to the OpenAI.create_chat function, but with the MistralAI API parameters.

It tries to access the MISTRALAI_API_KEY ENV variable, but you can also provide it via the api_key keyword argument.

source


# PromptingTools.aiclassifyMethod.
julia
aiclassify(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiclassify call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

source


', 87)), createBaseVNode("div", _hoisted_2, [ _cache[10] || (_cache[10] = createStaticVNode('# PromptingTools.aiclassifyMethod.
julia
aiclassify(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;\n    choices::AbstractVector{T} = ["true", "false", "unknown"],\n    model::AbstractString = MODEL_CHAT,\n    api_kwargs::NamedTuple = NamedTuple(),\n    token_ids_map::Union{Nothing, Dict{<:AbstractString, <:Integer}} = nothing,\n    kwargs...) where {T <: Union{AbstractString, Tuple{<:AbstractString, <:AbstractString}}}

Classifies the given prompt/statement into an arbitrary list of choices, which must be only the choices (vector of strings) or choices and descriptions are provided (vector of tuples, ie, ("choice","description")).

It's quick and easy option for "routing" and similar use cases, as it exploits the logit bias trick and outputs only 1 token. classify into an arbitrary list of categories (including with descriptions). It's quick and easy option for "routing" and similar use cases, as it exploits the logit bias trick, so it outputs only 1 token.

', 9)), createBaseVNode("p", null, [ @@ -44,9 +44,9 @@ function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { createBaseVNode("code", null, toDisplayString(_ctx.choices), 1), _cache[9] || (_cache[9] = createTextVNode(") that will be replaced with the encoded choices")) ]), - _cache[11] || (_cache[11] = createStaticVNode('

Choices are rewritten into an enumerated list and mapped to a few known OpenAI tokens (maximum of 40 choices supported). Mapping of token IDs for GPT3.5/4 are saved in variable OPENAI_TOKEN_IDS.

It uses Logit bias trick and limits the output to 1 token to force the model to output only true/false/unknown. Credit for the idea goes to AAAzzam.

Arguments

Example

Given a user input, pick one of the two provided categories:

julia
choices = ["animal", "plant"]\ninput = "Palm tree"\naiclassify(:InputClassifier; choices, input)

Choices with descriptions provided as tuples:

julia
choices = [("A", "any animal or creature"), ("P", "any plant or tree"), ("O", "anything else")]\n\n# try the below inputs:\ninput = "spider" # -> returns "A" for any animal or creature\ninput = "daphodil" # -> returns "P" for any plant or tree\ninput = "castle" # -> returns "O" for everything else\naiclassify(:InputClassifier; choices, input)

You could also use this function for routing questions to different endpoints (notice the different template and placeholder used), eg,

julia
choices = [("A", "any question about animal or creature"), ("P", "any question about plant or tree"), ("O", "anything else")]\nquestion = "how many spiders are there?"\nmsg = aiclassify(:QuestionRouter; choices, question)\n# "A"

You can still use a simple true/false classification:

julia
aiclassify("Is two plus two four?") # true\naiclassify("Is two plus three a vegetable on Mars?") # false

aiclassify returns only true/false/unknown. It's easy to get the proper Bool output type out with tryparse, eg,

julia
tryparse(Bool, aiclassify("Is two plus two four?")) isa Bool # true

Output of type Nothing marks that the model couldn't classify the statement as true/false.

Ideally, we would like to re-use some helpful system prompt to get more accurate responses. For this reason we have templates, eg, :JudgeIsItTrue. By specifying the template, we can provide our statement as the expected variable (it in this case) See that the model now correctly classifies the statement as "unknown".

julia
aiclassify(:JudgeIsItTrue; it = "Is two plus three a vegetable on Mars?") # unknown

For better results, use higher quality models like gpt4, eg,

julia
aiclassify(:JudgeIsItTrue;\n    it = "If I had two apples and I got three more, I have five apples now.",\n    model = "gpt4") # true

source

', 21)) + _cache[11] || (_cache[11] = createStaticVNode('

Choices are rewritten into an enumerated list and mapped to a few known OpenAI tokens (maximum of 40 choices supported). Mapping of token IDs for GPT3.5/4 are saved in variable OPENAI_TOKEN_IDS.

It uses Logit bias trick and limits the output to 1 token to force the model to output only true/false/unknown. Credit for the idea goes to AAAzzam.

Arguments

Example

Given a user input, pick one of the two provided categories:

julia
choices = ["animal", "plant"]\ninput = "Palm tree"\naiclassify(:InputClassifier; choices, input)

Choices with descriptions provided as tuples:

julia
choices = [("A", "any animal or creature"), ("P", "any plant or tree"), ("O", "anything else")]\n\n# try the below inputs:\ninput = "spider" # -> returns "A" for any animal or creature\ninput = "daphodil" # -> returns "P" for any plant or tree\ninput = "castle" # -> returns "O" for everything else\naiclassify(:InputClassifier; choices, input)

You could also use this function for routing questions to different endpoints (notice the different template and placeholder used), eg,

julia
choices = [("A", "any question about animal or creature"), ("P", "any question about plant or tree"), ("O", "anything else")]\nquestion = "how many spiders are there?"\nmsg = aiclassify(:QuestionRouter; choices, question)\n# "A"

You can still use a simple true/false classification:

julia
aiclassify("Is two plus two four?") # true\naiclassify("Is two plus three a vegetable on Mars?") # false

aiclassify returns only true/false/unknown. It's easy to get the proper Bool output type out with tryparse, eg,

julia
tryparse(Bool, aiclassify("Is two plus two four?")) isa Bool # true

Output of type Nothing marks that the model couldn't classify the statement as true/false.

Ideally, we would like to re-use some helpful system prompt to get more accurate responses. For this reason we have templates, eg, :JudgeIsItTrue. By specifying the template, we can provide our statement as the expected variable (it in this case) See that the model now correctly classifies the statement as "unknown".

julia
aiclassify(:JudgeIsItTrue; it = "Is two plus three a vegetable on Mars?") # unknown

For better results, use higher quality models like gpt4, eg,

julia
aiclassify(:JudgeIsItTrue;\n    it = "If I had two apples and I got three more, I have five apples now.",\n    model = "gpt4") # true

source

', 21)) ]), - _cache[49] || (_cache[49] = createStaticVNode('
# PromptingTools.aiembedFunction.
julia
aiembed(tracer_schema::AbstractTracerSchema,\n    doc_or_docs::Union{AbstractString, AbstractVector{<:AbstractString}}, postprocess::Function = identity;\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiembed call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

source


# PromptingTools.aiembedMethod.
julia
aiembed(prompt_schema::AbstractOllamaManagedSchema,\n        doc_or_docs::Union{AbstractString, AbstractVector{<:AbstractString}},\n        postprocess::F = identity;\n        verbose::Bool = true,\n        api_key::String = "",\n        model::String = MODEL_EMBEDDING,\n        http_kwargs::NamedTuple = (retry_non_idempotent = true,\n                                   retries = 5,\n                                   readtimeout = 120),\n        api_kwargs::NamedTuple = NamedTuple(),\n        kwargs...) where {F <: Function}

The aiembed function generates embeddings for the given input using a specified model and returns a message object containing the embeddings, status, token count, and elapsed time.

Arguments

Returns

Note: Ollama API currently does not return the token count, so it's set to (0,0)

Example

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\n\nmsg = aiembed(schema, "Hello World"; model="openhermes2.5-mistral")\nmsg.content # 4096-element JSON3.Array{Float64...

We can embed multiple strings at once and they will be hcat into a matrix (ie, each column corresponds to one string)

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\n\nmsg = aiembed(schema, ["Hello World", "How are you?"]; model="openhermes2.5-mistral")\nmsg.content # 4096×2 Matrix{Float64}:

If you plan to calculate the cosine distance between embeddings, you can normalize them first:

julia
const PT = PromptingTools\nusing LinearAlgebra\nschema = PT.OllamaManagedSchema()\n\nmsg = aiembed(schema, ["embed me", "and me too"], LinearAlgebra.normalize; model="openhermes2.5-mistral")\n\n# calculate cosine distance between the two normalized embeddings as a simple dot product\nmsg.content' * msg.content[:, 1] # [1.0, 0.34]

Similarly, you can use the postprocess argument to materialize the data from JSON3.Object by using postprocess = copy

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\n\nmsg = aiembed(schema, "Hello World", copy; model="openhermes2.5-mistral")\nmsg.content # 4096-element Vector{Float64}

source


# PromptingTools.aiembedMethod.
julia
aiembed(prompt_schema::AbstractOpenAISchema,\n        doc_or_docs::Union{AbstractString, AbstractVector{<:AbstractString}},\n        postprocess::F = identity;\n        verbose::Bool = true,\n        api_key::String = OPENAI_API_KEY,\n        model::String = MODEL_EMBEDDING, \n        http_kwargs::NamedTuple = (retry_non_idempotent = true,\n                                   retries = 5,\n                                   readtimeout = 120),\n        api_kwargs::NamedTuple = NamedTuple(),\n        kwargs...) where {F <: Function}

The aiembed function generates embeddings for the given input using a specified model and returns a message object containing the embeddings, status, token count, and elapsed time.

Arguments

Returns

Example

julia
msg = aiembed("Hello World")\nmsg.content # 1536-element JSON3.Array{Float64...

We can embed multiple strings at once and they will be hcat into a matrix (ie, each column corresponds to one string)

julia
msg = aiembed(["Hello World", "How are you?"])\nmsg.content # 1536×2 Matrix{Float64}:

If you plan to calculate the cosine distance between embeddings, you can normalize them first:

julia
using LinearAlgebra\nmsg = aiembed(["embed me", "and me too"], LinearAlgebra.normalize)\n\n# calculate cosine distance between the two normalized embeddings as a simple dot product\nmsg.content' * msg.content[:, 1] # [1.0, 0.787]

source


# PromptingTools.aiextractMethod.
julia
aiextract(prompt_schema::AbstractAnthropicSchema, prompt::ALLOWED_PROMPT_TYPE;\n    return_type::Union{Type, AbstractTool, Vector},\n    verbose::Bool = true,\n    api_key::String = ANTHROPIC_API_KEY,\n    model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    no_system_message::Bool = false,\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = NamedTuple(),\n    cache::Union{Nothing, Symbol} = nothing,\n    kwargs...)

Extract required information (defined by a struct return_type) from the provided prompt by leveraging Anthropic's function calling mode.

This is a perfect solution for extracting structured information from text (eg, extract organization names in news articles, etc.).

Read best practics here.

It's effectively a light wrapper around aigenerate call, which requires additional keyword argument return_type to be provided and will enforce the model outputs to adhere to it.

Arguments

Note: At the moment, the cache is only allowed for prompt segments over 1024 tokens (in some cases, over 2048 tokens). You'll get an error if you try to cache short prompts.

Returns

If return_all=false (default):

If return_all=true:

See also: tool_call_signature, MaybeExtract, ItemsExtract, aigenerate

Example

Do you want to extract some specific measurements from a text like age, weight and height? You need to define the information you need as a struct (return_type):

"Person's age, height, and weight."\nstruct MyMeasurement\n    age::Int # required\n    height::Union{Int,Nothing} # optional\n    weight::Union{Nothing,Float64} # optional\nend\nmsg = aiextract("James is 30, weighs 80kg. He's 180cm tall."; model="claudeh", return_type=MyMeasurement)\n# PromptingTools.DataMessage(MyMeasurement)\nmsg.content\n# MyMeasurement(30, 180, 80.0)

The fields that allow Nothing are marked as optional in the schema:

msg = aiextract("James is 30."; model="claudeh", return_type=MyMeasurement)\n# MyMeasurement(30, nothing, nothing)

If there are multiple items you want to extract, define a wrapper struct to get a Vector of MyMeasurement:

struct ManyMeasurements\n    measurements::Vector{MyMeasurement}\nend\n\nmsg = aiextract("James is 30, weighs 80kg. He's 180cm tall. Then Jack is 19 but really tall - over 190!"; model="claudeh", return_type=ManyMeasurements)\n\nmsg.content.measurements\n# 2-element Vector{MyMeasurement}:\n#  MyMeasurement(30, 180, 80.0)\n#  MyMeasurement(19, 190, nothing)

Or you can use the convenience wrapper ItemsExtract to extract multiple measurements (zero, one or more):

julia
using PromptingTools: ItemsExtract\n\nreturn_type = ItemsExtract{MyMeasurement}\nmsg = aiextract("James is 30, weighs 80kg. He's 180cm tall. Then Jack is 19 but really tall - over 190!"; model="claudeh", return_type)\n\nmsg.content.items # see the extracted items

Or if you want your extraction to fail gracefully when data isn't found, use MaybeExtract{T} wrapper (this trick is inspired by the Instructor package!):

using PromptingTools: MaybeExtract\n\nreturn_type = MaybeExtract{MyMeasurement}\n# Effectively the same as:\n# struct MaybeExtract{T}\n#     result::Union{T, Nothing} // The result of the extraction\n#     error::Bool // true if a result is found, false otherwise\n#     message::Union{Nothing, String} // Only present if no result is found, should be short and concise\n# end\n\n# If LLM extraction fails, it will return a Dict with `error` and `message` fields instead of the result!\nmsg = aiextract("Extract measurements from the text: I am giraffe"; model="claudeo", return_type)\nmsg.content\n# Output: MaybeExtract{MyMeasurement}(nothing, true, "I'm sorry, but your input of "I am giraffe" does not contain any information about a person's age, height or weight measurements that I can extract. To use this tool, please provide a statement that includes at least the person's age, and optionally their height in inches and weight in pounds. Without that information, I am unable to extract the requested measurements.")

That way, you can handle the error gracefully and get a reason why extraction failed (in msg.content.message).

However, this can fail with weaker models like claudeh, so we can apply some of our prompt templates with embedding reasoning step:

julia
msg = aiextract(:ExtractDataCoTXML; data="I am giraffe", model="claudeh", return_type)\nmsg.content\n# Output: MaybeExtract{MyMeasurement}(nothing, true, "The provided data does not contain the expected information about a person's age, height, and weight.")

Note that when using a prompt template, we provide data for the extraction as the corresponding placeholder (see aitemplates("extract") for documentation of this template).

Note that the error message refers to a giraffe not being a human, because in our MyMeasurement docstring, we said that it's for people!

Example of using a vector of field names with aiextract

julia
fields = [:location, :temperature => Float64, :condition => String]\nmsg = aiextract("Extract the following information from the text: location, temperature, condition. Text: The weather in New York is sunny and 72.5 degrees Fahrenheit."; \nreturn_type = fields, model="claudeh")

Or simply call aiextract("some text"; return_type = [:reasoning,:answer], model="claudeh") to get a Chain of Thought reasoning for extraction task.

It will be returned it a new generated type, which you can check with PromptingTools.isextracted(msg.content) == true to confirm the data has been extracted correctly.

This new syntax also allows you to provide field-level descriptions, which will be passed to the model.

julia
fields_with_descriptions = [\n    :location,\n    :temperature => Float64,\n    :temperature__description => "Temperature in degrees Fahrenheit",\n    :condition => String,\n    :condition__description => "Current weather condition (e.g., sunny, rainy, cloudy)"\n]\nmsg = aiextract("The weather in New York is sunny and 72.5 degrees Fahrenheit."; return_type = fields_with_descriptions, model="claudeh")

source


# PromptingTools.aiextractMethod.
julia
aiextract(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;\n    return_type::Union{Type, AbstractTool, Vector},\n    verbose::Bool = true,\n    api_key::String = OPENAI_API_KEY,\n    model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = (;\n        tool_choice = nothing),\n    strict::Union{Nothing, Bool} = nothing,\n    kwargs...)

Extract required information (defined by a struct return_type) from the provided prompt by leveraging OpenAI function calling mode.

This is a perfect solution for extracting structured information from text (eg, extract organization names in news articles, etc.)

It's effectively a light wrapper around aigenerate call, which requires additional keyword argument return_type to be provided and will enforce the model outputs to adhere to it.

Arguments

Returns

If return_all=false (default):

If return_all=true:

Note: msg.content can be a single object (if a single tool is used) or a vector of objects (if multiple tools are used)!

See also: tool_call_signature, MaybeExtract, ItemsExtract, aigenerate, generate_struct

Example

Do you want to extract some specific measurements from a text like age, weight and height? You need to define the information you need as a struct (return_type):

"Person's age, height, and weight."\nstruct MyMeasurement\n    age::Int # required\n    height::Union{Int,Nothing} # optional\n    weight::Union{Nothing,Float64} # optional\nend\nmsg = aiextract("James is 30, weighs 80kg. He's 180cm tall."; return_type=MyMeasurement)\n# PromptingTools.DataMessage(MyMeasurement)\nmsg.content\n# MyMeasurement(30, 180, 80.0)

The fields that allow Nothing are marked as optional in the schema:

msg = aiextract("James is 30."; return_type=MyMeasurement)\n# MyMeasurement(30, nothing, nothing)

If there are multiple items you want to extract, define a wrapper struct to get a Vector of MyMeasurement:

struct ManyMeasurements\n    measurements::Vector{MyMeasurement}\nend\n\nmsg = aiextract("James is 30, weighs 80kg. He's 180cm tall. Then Jack is 19 but really tall - over 190!"; return_type=ManyMeasurements)\n\nmsg.content.measurements\n# 2-element Vector{MyMeasurement}:\n#  MyMeasurement(30, 180, 80.0)\n#  MyMeasurement(19, 190, nothing)

Or you can use the convenience wrapper ItemsExtract to extract multiple measurements (zero, one or more):

julia
using PromptingTools: ItemsExtract\n\nreturn_type = ItemsExtract{MyMeasurement}\nmsg = aiextract("James is 30, weighs 80kg. He's 180cm tall. Then Jack is 19 but really tall - over 190!"; return_type)\n\nmsg.content.items # see the extracted items

Or if you want your extraction to fail gracefully when data isn't found, use MaybeExtract{T} wrapper (this trick is inspired by the Instructor package!):

using PromptingTools: MaybeExtract\n\nreturn_type = MaybeExtract{MyMeasurement}\n# Effectively the same as:\n# struct MaybeExtract{T}\n#     result::Union{T, Nothing} // The result of the extraction\n#     error::Bool // true if a result is found, false otherwise\n#     message::Union{Nothing, String} // Only present if no result is found, should be short and concise\n# end\n\n# If LLM extraction fails, it will return a Dict with `error` and `message` fields instead of the result!\nmsg = aiextract("Extract measurements from the text: I am giraffe"; return_type)\nmsg.content\n# MaybeExtract{MyMeasurement}(nothing, true, "I'm sorry, but I can only assist with human measurements.")

That way, you can handle the error gracefully and get a reason why extraction failed (in msg.content.message).

Note that the error message refers to a giraffe not being a human, because in our MyMeasurement docstring, we said that it's for people!

Some non-OpenAI providers require a different specification of the "tool choice" than OpenAI. For example, to use Mistral models ("mistrall" for mistral large), do:

julia
"Some fruit"\nstruct Fruit\n    name::String\nend\naiextract("I ate an apple",return_type=Fruit,api_kwargs=(;tool_choice="any"),model="mistrall")\n# Notice two differences: 1) struct MUST have a docstring, 2) tool_choice is set explicitly set to "any"

Example of using a vector of field names with aiextract

julia
fields = [:location, :temperature => Float64, :condition => String]\nmsg = aiextract("Extract the following information from the text: location, temperature, condition. Text: The weather in New York is sunny and 72.5 degrees Fahrenheit."; return_type = fields)

Or simply call aiextract("some text"; return_type = [:reasoning,:answer]) to get a Chain of Thought reasoning for extraction task.

It will be returned it a new generated type, which you can check with PromptingTools.isextracted(msg.content) == true to confirm the data has been extracted correctly.

This new syntax also allows you to provide field-level descriptions, which will be passed to the model.

julia
fields_with_descriptions = [\n    :location,\n    :temperature => Float64,\n    :temperature__description => "Temperature in degrees Fahrenheit",\n    :condition => String,\n    :condition__description => "Current weather condition (e.g., sunny, rainy, cloudy)"\n]\nmsg = aiextract("The weather in New York is sunny and 72.5 degrees Fahrenheit."; return_type = fields_with_descriptions)

If you feel that the extraction is not smart/creative enough, you can use json_mode = true to enforce the JSON mode, which automatically enables the structured output mode (as opposed to function calling mode).

The JSON mode is useful for cases when you want to enforce a specific output format, such as JSON, and want the model to adhere to that format, but don't want to pretend it's a "function call". Expect a few second delay on the first call for a specific struct, as the provider has to produce the constrained grammer first.

julia
msg = aiextract("Extract the following information from the text: location, temperature, condition. Text: The weather in New York is sunny and 72.5 degrees Fahrenheit."; \nreturn_type = fields_with_descriptions, json_mode = true)\n# PromptingTools.DataMessage(NamedTuple)\n\nmsg.content\n# (location = "New York", temperature = 72.5, condition = "sunny")

It works equally well for structs provided as return types:

julia
msg = aiextract("James is 30, weighs 80kg. He's 180cm tall."; return_type=MyMeasurement, json_mode=true)

source


# PromptingTools.aiextractMethod.
julia
aiextract(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiextract call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractAnthropicSchema, prompt::ALLOWED_PROMPT_TYPE; verbose::Bool = true,\n    api_key::String = ANTHROPIC_API_KEY, model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    streamcallback::Any = nothing,\n    no_system_message::Bool = false,\n    aiprefill::Union{Nothing, AbstractString} = nothing,\n    http_kwargs::NamedTuple = NamedTuple(), api_kwargs::NamedTuple = NamedTuple(),\n    cache::Union{Nothing, Symbol} = nothing,\n    kwargs...)

Generate an AI response based on a given prompt using the Anthropic API.

Arguments

Note: At the moment, the cache is only allowed for prompt segments over 1024 tokens (in some cases, over 2048 tokens). You'll get an error if you try to cache short prompts.

Returns

Use msg.content to access the extracted string.

See also: ai_str, aai_str

Example

Simple hello world to test the API:

julia
const PT = PromptingTools\nschema = PT.AnthropicSchema() # We need to explicit if we want Anthropic, otherwise OpenAISchema is the default\n\nmsg = aigenerate(schema, "Say hi!"; model="claudeh") #claudeh is the model alias for Claude 3 Haiku, fast and cheap model\n[ Info: Tokens: 21 @ Cost: $0.0 in 0.6 seconds\nAIMessage("Hello!")

msg is an AIMessage object. Access the generated string via content property:

julia
typeof(msg) # AIMessage{SubString{String}}\npropertynames(msg) # (:content, :status, :tokens, :elapsed, :cost, :log_prob, :finish_reason, :run_id, :sample_id, :_type)\nmsg.content # "Hello!

Note: We need to be explicit about the schema we want to use. If we don't, it will default to OpenAISchema (=PT.DEFAULT_SCHEMA) Alternatively, if you provide a known model name or alias (eg, claudeh for Claude 3 Haiku - see MODEL_REGISTRY), the schema will be inferred from the model name.

We will use Claude 3 Haiku model for the following examples, so not need to specify the schema. See also "claudeo" and "claudes" for other Claude 3 models.

You can use string interpolation:

julia
const PT = PromptingTools\n\na = 1\nmsg=aigenerate("What is `$a+$a`?"; model="claudeh")\nmsg.content # "The answer to `1+1` is `2`."

___ You can provide the whole conversation or more intricate prompts as a Vector{AbstractMessage}. Claude models are good at completeling conversations that ended with an AIMessage (they just continue where it left off):

julia
const PT = PromptingTools\n\nconversation = [\n    PT.SystemMessage("You're master Yoda from Star Wars trying to help the user become a Yedi."),\n    PT.UserMessage("I have feelings for my iPhone. What should I do?"),\n    PT.AIMessage("Hmm, strong the attachment is,")]\n\nmsg = aigenerate(conversation; model="claudeh")\nAIMessage("I sense. But unhealthy it may be. Your iPhone, a tool it is, not a living being. Feelings of affection, understandable they are, <continues>")

Example of streaming:

julia
# Simplest usage, just provide where to steam the text\nmsg = aigenerate("Count from 1 to 100."; streamcallback = stdout, model="claudeh")\n\nstreamcallback = PT.StreamCallback()\nmsg = aigenerate("Count from 1 to 100."; streamcallback, model="claudeh")\n# this allows you to inspect each chunk with `streamcallback.chunks`. You can them empty it with `empty!(streamcallback)` in between repeated calls.\n\n# Get verbose output with details of each chunk\nstreamcallback = PT.StreamCallback(; verbose=true, throw_on_error=true)\nmsg = aigenerate("Count from 1 to 10."; streamcallback, model="claudeh")

Note: Streaming support is only for Anthropic models and it doesn't yet support tool calling and a few other features (logprobs, refusals, etc.)

You can also provide a prefill for the AI response to steer the response in a certain direction (eg, formatting, style):

julia
msg = aigenerate("Sum up 1 to 100."; aiprefill = "I'd be happy to answer in one number without any additional text. The answer is:", model="claudeh")

Note: It MUST NOT end with a trailing with space. You'll get an API error if you do.

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractGoogleSchema, prompt::ALLOWED_PROMPT_TYPE;\n    verbose::Bool = true,\n    api_key::String = GOOGLE_API_KEY,\n    model::String = "gemini-pro", return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    no_system_message::Bool = false,\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = NamedTuple(),\n    kwargs...)

Generate an AI response based on a given prompt using the Google Gemini API. Get the API key here.

Note:

Arguments

Returns

If return_all=false (default):

Use msg.content to access the extracted string.

If return_all=true:

See also: ai_str, aai_str, aiembed, aiclassify, aiextract, aiscan, aitemplates

Example

Simple hello world to test the API:

julia
result = aigenerate("Say Hi!"; model="gemini-pro")\n# AIMessage("Hi there! 👋 I'm here to help you with any questions or tasks you may have. Just let me know what you need, and I'll do my best to assist you.")

result is an AIMessage object. Access the generated string via content property:

julia
typeof(result) # AIMessage{SubString{String}}\npropertynames(result) # (:content, :status, :tokens, :elapsed\nresult.content # "Hi there! ...

___ You can use string interpolation and alias "gemini":

julia
a = 1\nmsg=aigenerate("What is `$a+$a`?"; model="gemini")\nmsg.content # "1+1 is 2."

___ You can provide the whole conversation or more intricate prompts as a Vector{AbstractMessage}:

julia
const PT = PromptingTools\n\nconversation = [\n    PT.SystemMessage("You're master Yoda from Star Wars trying to help the user become a Yedi."),\n    PT.UserMessage("I have feelings for my iPhone. What should I do?")]\nmsg=aigenerate(conversation; model="gemini")\n# AIMessage("Young Padawan, you have stumbled into a dangerous path.... <continues>")

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractOllamaManagedSchema, prompt::ALLOWED_PROMPT_TYPE; verbose::Bool = true,\n    api_key::String = "", model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    http_kwargs::NamedTuple = NamedTuple(), api_kwargs::NamedTuple = NamedTuple(),\n    kwargs...)

Generate an AI response based on a given prompt using the OpenAI API.

Arguments

Returns

Use msg.content to access the extracted string.

See also: ai_str, aai_str, aiembed

Example

Simple hello world to test the API:

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema() # We need to explicit if we want Ollama, OpenAISchema is the default\n\nmsg = aigenerate(schema, "Say hi!"; model="openhermes2.5-mistral")\n# [ Info: Tokens: 69 in 0.9 seconds\n# AIMessage("Hello! How can I assist you today?")

msg is an AIMessage object. Access the generated string via content property:

julia
typeof(msg) # AIMessage{SubString{String}}\npropertynames(msg) # (:content, :status, :tokens, :elapsed\nmsg.content # "Hello! How can I assist you today?"

Note: We need to be explicit about the schema we want to use. If we don't, it will default to OpenAISchema (=PT.DEFAULT_SCHEMA) ___ You can use string interpolation:

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\na = 1\nmsg=aigenerate(schema, "What is `$a+$a`?"; model="openhermes2.5-mistral")\nmsg.content # "The result of `1+1` is `2`."

___ You can provide the whole conversation or more intricate prompts as a Vector{AbstractMessage}:

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\n\nconversation = [\n    PT.SystemMessage("You're master Yoda from Star Wars trying to help the user become a Yedi."),\n    PT.UserMessage("I have feelings for my iPhone. What should I do?")]\n\nmsg = aigenerate(schema, conversation; model="openhermes2.5-mistral")\n# [ Info: Tokens: 111 in 2.1 seconds\n# AIMessage("Strong the attachment is, it leads to suffering it may. Focus on the force within you must, ...<continues>")

Note: Managed Ollama currently supports at most 1 User Message and 1 System Message given the API limitations. If you want more, you need to use the ChatMLSchema.

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractOllamaManagedSchema, prompt::ALLOWED_PROMPT_TYPE; verbose::Bool = true,\n    api_key::String = "", model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    http_kwargs::NamedTuple = NamedTuple(), api_kwargs::NamedTuple = NamedTuple(),\n    kwargs...)

Generate an AI response based on a given prompt using the OpenAI API.

Arguments

Returns

Use msg.content to access the extracted string.

See also: ai_str, aai_str, aiembed

Example

Simple hello world to test the API:

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema() # We need to explicit if we want Ollama, OpenAISchema is the default\n\nmsg = aigenerate(schema, "Say hi!"; model="openhermes2.5-mistral")\n# [ Info: Tokens: 69 in 0.9 seconds\n# AIMessage("Hello! How can I assist you today?")

msg is an AIMessage object. Access the generated string via content property:

julia
typeof(msg) # AIMessage{SubString{String}}\npropertynames(msg) # (:content, :status, :tokens, :elapsed\nmsg.content # "Hello! How can I assist you today?"

Note: We need to be explicit about the schema we want to use. If we don't, it will default to OpenAISchema (=PT.DEFAULT_SCHEMA) ___ You can use string interpolation:

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\na = 1\nmsg=aigenerate(schema, "What is `$a+$a`?"; model="openhermes2.5-mistral")\nmsg.content # "The result of `1+1` is `2`."

___ You can provide the whole conversation or more intricate prompts as a Vector{AbstractMessage}:

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\n\nconversation = [\n    PT.SystemMessage("You're master Yoda from Star Wars trying to help the user become a Yedi."),\n    PT.UserMessage("I have feelings for my iPhone. What should I do?")]\n\nmsg = aigenerate(schema, conversation; model="openhermes2.5-mistral")\n# [ Info: Tokens: 111 in 2.1 seconds\n# AIMessage("Strong the attachment is, it leads to suffering it may. Focus on the force within you must, ...<continues>")

Note: Managed Ollama currently supports at most 1 User Message and 1 System Message given the API limitations. If you want more, you need to use the ChatMLSchema.

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;\n    verbose::Bool = true,\n    api_key::String = OPENAI_API_KEY,\n    model::String = MODEL_CHAT, return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    streamcallback::Any = nothing,\n    no_system_message::Bool = false,\n    name_user::Union{Nothing, String} = nothing,\n    name_assistant::Union{Nothing, String} = nothing,\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = NamedTuple(),\n    kwargs...)

Generate an AI response based on a given prompt using the OpenAI API.

Arguments

Returns

If return_all=false (default):

Use msg.content to access the extracted string.

If return_all=true:

See also: ai_str, aai_str, aiembed, aiclassify, aiextract, aiscan, aitemplates

Example

Simple hello world to test the API:

julia
result = aigenerate("Say Hi!")\n# [ Info: Tokens: 29 @ Cost: $0.0 in 1.0 seconds\n# AIMessage("Hello! How can I assist you today?")

result is an AIMessage object. Access the generated string via content property:

julia
typeof(result) # AIMessage{SubString{String}}\npropertynames(result) # (:content, :status, :tokens, :elapsed\nresult.content # "Hello! How can I assist you today?"

___ You can use string interpolation:

julia
a = 1\nmsg=aigenerate("What is `$a+$a`?")\nmsg.content # "The sum of `1+1` is `2`."

___ You can provide the whole conversation or more intricate prompts as a Vector{AbstractMessage}:

julia
const PT = PromptingTools\n\nconversation = [\n    PT.SystemMessage("You're master Yoda from Star Wars trying to help the user become a Yedi."),\n    PT.UserMessage("I have feelings for my iPhone. What should I do?")]\nmsg=aigenerate(conversation)\n# AIMessage("Ah, strong feelings you have for your iPhone. A Jedi's path, this is not... <continues>")

Example of streaming:

julia
# Simplest usage, just provide where to steam the text\nmsg = aigenerate("Count from 1 to 100."; streamcallback = stdout)\n\nstreamcallback = PT.StreamCallback()\nmsg = aigenerate("Count from 1 to 100."; streamcallback)\n# this allows you to inspect each chunk with `streamcallback.chunks`. You can them empty it with `empty!(streamcallback)` in between repeated calls.\n\n# Get verbose output with details of each chunk\nstreamcallback = PT.StreamCallback(; verbose=true, throw_on_error=true)\nmsg = aigenerate("Count from 1 to 10."; streamcallback)

Learn more in ?StreamCallback. Note: Streaming support is only for OpenAI models and it doesn't yet support tool calling and a few other features (logprobs, refusals, etc.)

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;\n    tracer_kwargs = NamedTuple(), model = "", return_all::Bool = false, kwargs...)

Wraps the normal aigenerate call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

Example

julia
wrap_schema = PT.TracerSchema(PT.OpenAISchema())\nmsg = aigenerate(wrap_schema, "Say hi!"; model = "gpt4t")\nmsg isa TracerMessage # true\nmsg.content # access content like if it was the message\nPT.pprint(msg) # pretty-print the message

It works on a vector of messages and converts only the non-tracer ones, eg,

julia
wrap_schema = PT.TracerSchema(PT.OpenAISchema())\nconv = aigenerate(wrap_schema, "Say hi!"; model = "gpt4t", return_all = true)\nall(PT.istracermessage, conv) #true

source


# PromptingTools.aiimageMethod.
julia
aiimage(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;\n    image_size::AbstractString = "1024x1024",\n    image_quality::AbstractString = "standard",\n    image_n::Integer = 1,\n    verbose::Bool = true,\n    api_key::String = OPENAI_API_KEY,\n    model::String = MODEL_IMAGE_GENERATION,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = NamedTuple(),\n    kwargs...)

Generates an image from the provided prompt. If multiple "messages" are provided in prompt, it extracts the text ONLY from the last message!

Image (or the reference to it) will be returned in a DataMessage.content, the format will depend on the api_kwargs.response_format you set.

Can be used for generating images of varying quality and style with dall-e-* models. This function DOES NOT SUPPORT multi-turn conversations (ie, do not provide previous conversation via conversation argument).

Arguments

Returns

If return_all=false (default):

Use msg.content to access the extracted string.

If return_all=true:

See also: ai_str, aai_str, aigenerate, aiembed, aiclassify, aiextract, aiscan, aitemplates

Notes

Example

Generate an image:

julia
# You can experiment with `image_size`, `image_quality` kwargs!\nmsg = aiimage("A white cat on a car")\n\n# Download the image into a file\nusing Downloads\nDownloads.download(msg.content[:url], "cat_on_car.png")\n\n# You can also see the revised prompt that DALL-E 3 used\nmsg.content[:revised_prompt]\n# Output: "Visualize a pristine white cat gracefully perched atop a shiny car. \n# The cat's fur is stark white and its eyes bright with curiosity. \n# As for the car, it could be a contemporary sedan, glossy and in a vibrant color. \n# The scene could be set under the blue sky, enhancing the contrast between the white cat, the colorful car, and the bright blue sky."

Note that you MUST download any URL-based images within 60 minutes. The links will become inactive.

If you wanted to download image directly into the DataMessage, provide response_format="b64_json" in api_kwargs:

julia
msg = aiimage("A white cat on a car"; image_quality="hd", api_kwargs=(; response_format="b64_json"))\n\n# Then you need to use Base64 package to decode it and save it to a file:\nusing Base64\nwrite("cat_on_car_hd.png", base64decode(msg.content[:b64_json]));

source


# PromptingTools.aiimageMethod.
julia
aiimage(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiimage call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

source


# PromptingTools.aiscanMethod.
julia
aiscan([prompt_schema::AbstractOllamaSchema,] prompt::ALLOWED_PROMPT_TYPE; \nimage_url::Union{Nothing, AbstractString, Vector{<:AbstractString}} = nothing,\nimage_path::Union{Nothing, AbstractString, Vector{<:AbstractString}} = nothing,\nattach_to_latest::Bool = true,\nverbose::Bool = true, api_key::String = OPENAI_API_KEY,\n    model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    http_kwargs::NamedTuple = (;\n        retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), \n    api_kwargs::NamedTuple = = (; max_tokens = 2500),\n    kwargs...)

Scans the provided image (image_url or image_path) with the goal provided in the prompt.

Can be used for many multi-modal tasks, such as: OCR (transcribe text in the image), image captioning, image classification, etc.

It's effectively a light wrapper around aigenerate call, which uses additional keyword arguments image_url, image_path, image_detail to be provided. At least one image source (url or path) must be provided.

Arguments

Returns

If return_all=false (default):

Use msg.content to access the extracted string.

If return_all=true:

See also: ai_str, aai_str, aigenerate, aiembed, aiclassify, aiextract, aitemplates

Notes

Example

Describe the provided image:

julia
msg = aiscan("Describe the image"; image_path="julia.png", model="bakllava")\n# [ Info: Tokens: 1141 @ Cost: $0.0117 in 2.2 seconds\n# AIMessage("The image shows a logo consisting of the word "julia" written in lowercase")

You can provide multiple images at once as a vector and ask for "low" level of detail (cheaper):

julia
msg = aiscan("Describe the image"; image_path=["julia.png","python.png"] model="bakllava")

You can use this function as a nice and quick OCR (transcribe text in the image) with a template :OCRTask. Let's transcribe some SQL code from a screenshot (no more re-typing!):

julia
using Downloads\n# Screenshot of some SQL code -- we cannot use image_url directly, so we need to download it first\nimage_url = "https://www.sqlservercentral.com/wp-content/uploads/legacy/8755f69180b7ac7ee76a69ae68ec36872a116ad4/24622.png"\nimage_path = Downloads.download(image_url)\nmsg = aiscan(:OCRTask; image_path, model="bakllava", task="Transcribe the SQL code in the image.", api_kwargs=(; max_tokens=2500))\n\n# AIMessage("```sql\n# update Orders <continue>\n\n# You can add syntax highlighting of the outputs via Markdown\nusing Markdown\nmsg.content |> Markdown.parse

Local models cannot handle image URLs directly (image_url), so you need to download the image first and provide it as image_path:

julia
using Downloads\nimage_path = Downloads.download(image_url)

Notice that we set max_tokens = 2500. If your outputs seem truncated, it might be because the default maximum tokens on the server is set too low!

source


# PromptingTools.aiscanMethod.
julia
aiscan([prompt_schema::AbstractOpenAISchema,] prompt::ALLOWED_PROMPT_TYPE; \nimage_url::Union{Nothing, AbstractString, Vector{<:AbstractString}} = nothing,\nimage_path::Union{Nothing, AbstractString, Vector{<:AbstractString}} = nothing,\nimage_detail::AbstractString = "auto",\nattach_to_latest::Bool = true,\nverbose::Bool = true, api_key::String = OPENAI_API_KEY,\n    model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    http_kwargs::NamedTuple = (;\n        retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), \n    api_kwargs::NamedTuple = = (; max_tokens = 2500),\n    kwargs...)

Scans the provided image (image_url or image_path) with the goal provided in the prompt.

Can be used for many multi-modal tasks, such as: OCR (transcribe text in the image), image captioning, image classification, etc.

It's effectively a light wrapper around aigenerate call, which uses additional keyword arguments image_url, image_path, image_detail to be provided. At least one image source (url or path) must be provided.

Arguments

Returns

If return_all=false (default):

Use msg.content to access the extracted string.

If return_all=true:

See also: ai_str, aai_str, aigenerate, aiembed, aiclassify, aiextract, aitemplates

Notes

Example

Describe the provided image:

julia
msg = aiscan("Describe the image"; image_path="julia.png", model="gpt4v")\n# [ Info: Tokens: 1141 @ Cost: $0.0117 in 2.2 seconds\n# AIMessage("The image shows a logo consisting of the word "julia" written in lowercase")

You can provide multiple images at once as a vector and ask for "low" level of detail (cheaper):

julia
msg = aiscan("Describe the image"; image_path=["julia.png","python.png"], image_detail="low", model="gpt4v")

You can use this function as a nice and quick OCR (transcribe text in the image) with a template :OCRTask. Let's transcribe some SQL code from a screenshot (no more re-typing!):

julia
# Screenshot of some SQL code\nimage_url = "https://www.sqlservercentral.com/wp-content/uploads/legacy/8755f69180b7ac7ee76a69ae68ec36872a116ad4/24622.png"\nmsg = aiscan(:OCRTask; image_url, model="gpt4v", task="Transcribe the SQL code in the image.", api_kwargs=(; max_tokens=2500))\n\n# [ Info: Tokens: 362 @ Cost: $0.0045 in 2.5 seconds\n# AIMessage("```sql\n# update Orders <continue>\n\n# You can add syntax highlighting of the outputs via Markdown\nusing Markdown\nmsg.content |> Markdown.parse

Notice that we enforce max_tokens = 2500. That's because OpenAI seems to default to ~300 tokens, which provides incomplete outputs. Hence, we set this value to 2500 as a default. If you still get truncated outputs, increase this value.

source


# PromptingTools.aiscanMethod.
julia
aiscan(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiscan call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

source


# PromptingTools.aitemplatesFunction.
julia
aitemplates

Find easily the most suitable templates for your use case.

You can search by:

Keyword Arguments

Examples

Find available templates with aitemplates:

julia
tmps = aitemplates("JuliaExpertAsk")\n# Will surface one specific template\n# 1-element Vector{AITemplateMetadata}:\n# PromptingTools.AITemplateMetadata\n#   name: Symbol JuliaExpertAsk\n#   description: String "For asking questions about Julia language. Placeholders: `ask`"\n#   version: String "1"\n#   wordcount: Int64 237\n#   variables: Array{Symbol}((1,))\n#   system_preview: String "You are a world-class Julia language programmer with the knowledge of the latest syntax. Your commun"\n#   user_preview: String "# Question\n\n{{ask}}"\n#   source: String ""

The above gives you a good idea of what the template is about, what placeholders are available, and how much it would cost to use it (=wordcount).

Search for all Julia-related templates:

julia
tmps = aitemplates("Julia")\n# 2-element Vector{AITemplateMetadata}... -> more to come later!

If you are on VSCode, you can leverage nice tabular display with vscodedisplay:

julia
using DataFrames\ntmps = aitemplates("Julia") |> DataFrame |> vscodedisplay

I have my selected template, how do I use it? Just use the "name" in aigenerate or aiclassify like you see in the first example!

source


# PromptingTools.aitemplatesMethod.

Find the top-limit templates whose name or description fields partially match the query_key::String in TEMPLATE_METADATA.

source


# PromptingTools.aitemplatesMethod.

Find the top-limit templates where provided query_key::Regex matches either of name, description or previews or User or System messages in TEMPLATE_METADATA.

source


# PromptingTools.aitemplatesMethod.

Find the top-limit templates whose name::Symbol exactly matches the query_name::Symbol in TEMPLATE_METADATA.

source


# PromptingTools.aitoolsMethod.
julia
aitools(prompt_schema::AbstractAnthropicSchema, prompt::ALLOWED_PROMPT_TYPE;\n    kwargs...)\n    tools::Union{Type, Function, Method, AbstractTool, Vector} = Tool[],\n    verbose::Bool = true,\n    api_key::String = ANTHROPIC_API_KEY,\n    model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    no_system_message::Bool = false,\n    cache::Union{Nothing, Symbol} = nothing,\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = (;\n        tool_choice = nothing),\n    kwargs...)

Calls chat completion API with an optional tool call signature. It can receive both tools and standard string-based content. Ideal for agentic workflows with more complex cognitive architectures.

Difference to aigenerate: Response can be a tool call (structured)

Differences to aiextract: Can provide infinitely many tools (including Functions!) and then respond with the tool call's output.

Arguments

Example

julia
## Let's define a tool\nget_weather(location, date) = "The weather in $location on $date is 70 degrees."\n\nmsg = aitools("What's the weather in Tokyo on May 3rd, 2023?";\n    tools = get_weather, model = "claudeh")\nPT.execute_tool(get_weather, msg.tool_calls[1].args)\n# "The weather in Tokyo on 2023-05-03 is 70 degrees."\n\n# Ignores the tool\nmsg = aitools("What's your name?";\n    tools = get_weather, model = "claudeh")\n# I don't have a personal name, but you can call me your AI assistant!

How to have a multi-turn conversation with tools:

julia
conv = aitools("What's the weather in Tokyo on May 3rd, 2023?";\n    tools = get_weather, return_all = true, model = "claudeh")\n\ntool_msg = conv[end].tool_calls[1] # there can be multiple tool calls requested!!\n\n# Execute the output to the tool message content\ntool_msg.content = PT.execute_tool(get_weather, tool_msg.args)\n\n# Add the tool message to the conversation\npush!(conv, tool_msg)\n\n# Call LLM again with the updated conversation\nconv = aitools(\n    "And in New York?"; tools = get_weather, return_all = true, conversation = conv, model = "claudeh")\n# 6-element Vector{AbstractMessage}:\n# SystemMessage("Act as a helpful AI assistant")\n# UserMessage("What's the weather in Tokyo on May 3rd, 2023?")\n# AIToolRequest("-"; Tool Requests: 1)\n# ToolMessage("The weather in Tokyo on 2023-05-03 is 70 degrees.")\n# UserMessage("And in New York?")\n# AIToolRequest("-"; Tool Requests: 1)

source


# PromptingTools.aitoolsMethod.
julia
aitools(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;\n    tools::Union{Type, Function, Method, AbstractTool, Vector} = Tool[],\n    verbose::Bool = true,\n    api_key::String = OPENAI_API_KEY,\n    model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    no_system_message::Bool = false,\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = (;\n        tool_choice = nothing),\n    strict::Union{Nothing, Bool} = nothing,\n    json_mode::Union{Nothing, Bool} = nothing,\n    name_user::Union{Nothing, String} = nothing,\n    name_assistant::Union{Nothing, String} = nothing,\n    kwargs...)

Calls chat completion API with an optional tool call signature. It can receive both tools and standard string-based content. Ideal for agentic workflows with more complex cognitive architectures.

Difference to aigenerate: Response can be a tool call (structured)

Differences to aiextract: Can provide infinitely many tools (including Functions!) and then respond with the tool call's output.

Arguments

Example

julia
## Let's define a tool\nget_weather(location, date) = "The weather in $location on $date is 70 degrees."\n\n## JSON mode request\nmsg = aitools("What's the weather in Tokyo on May 3rd, 2023?";\n    tools = get_weather,\n    json_mode = true)\nPT.execute_tool(get_weather, msg.tool_calls[1].args)\n# "The weather in Tokyo on 2023-05-03 is 70 degrees."\n\n# Function calling request\nmsg = aitools("What's the weather in Tokyo on May 3rd, 2023?";\n    tools = get_weather)\nPT.execute_tool(get_weather, msg.tool_calls[1].args)\n# "The weather in Tokyo on 2023-05-03 is 70 degrees."\n\n# Ignores the tool\nmsg = aitools("What's your name?";\n    tools = get_weather)\n# I don't have a personal name, but you can call me your AI assistant!

How to have a multi-turn conversation with tools:

julia
conv = aitools("What's the weather in Tokyo on May 3rd, 2023?";\n    tools = get_weather, return_all = true)\n\ntool_msg = conv[end].tool_calls[1] # there can be multiple tool calls requested!!\n\n# Execute the output to the tool message content\ntool_msg.content = PT.execute_tool(get_weather, tool_msg.args)\n\n# Add the tool message to the conversation\npush!(conv, tool_msg)\n\n# Call LLM again with the updated conversation\nconv = aitools(\n    "And in New York?"; tools = get_weather, return_all = true, conversation = conv)\n# 6-element Vector{AbstractMessage}:\n# SystemMessage("Act as a helpful AI assistant")\n# UserMessage("What's the weather in Tokyo on May 3rd, 2023?")\n# AIToolRequest("-"; Tool Requests: 1)\n# ToolMessage("The weather in Tokyo on 2023-05-03 is 70 degrees.")\n# UserMessage("And in New York?")\n# AIToolRequest("-"; Tool Requests: 1)

source


# PromptingTools.aitoolsMethod.
julia
aitools(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aitools call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

source


# PromptingTools.align_tracer!Method.

Aligns multiple tracers in the vector to have the same Parent and Thread IDs as the first item.

source


# PromptingTools.align_tracer!Method.

Aligns the tracer message, updating the parent_id, thread_id. Often used to align multiple tracers in the vector to have the same IDs.

source


# PromptingTools.anthropic_apiFunction.
julia
anthropic_api(\n    prompt_schema::AbstractAnthropicSchema,\n    messages::Vector{<:AbstractDict{String, <:Any}} = Vector{Dict{String, Any}}();\n    api_key::AbstractString = ANTHROPIC_API_KEY,\n    system::Union{Nothing, AbstractString, AbstractVector{<:AbstractDict}} = nothing,\n    endpoint::String = "messages",\n    max_tokens::Int = 2048,\n    model::String = "claude-3-haiku-20240307", http_kwargs::NamedTuple = NamedTuple(),\n    stream::Bool = false,\n    url::String = "https://api.anthropic.com/v1",\n    cache::Union{Nothing, Symbol} = nothing,\n    kwargs...)

Simple wrapper for a call to Anthropic API.

Keyword Arguments

source


# PromptingTools.anthropic_extra_headersMethod.
julia
anthropic_extra_headers

Adds API version and beta headers to the request.

Kwargs / Beta headers

source


# PromptingTools.auth_headerMethod.
julia
auth_header(api_key::Union{Nothing, AbstractString};\n    bearer::Bool = true,\n    x_api_key::Bool = false,\n    extra_headers::AbstractVector = Vector{\n        Pair{String, String},\n    }[],\n    kwargs...)

Creates the authentication headers for any API request. Assumes that the communication is done in JSON format.

Arguments

source


# PromptingTools.build_response_bodyMethod.
julia
build_response_body(\n    flavor::AnthropicStream, cb::AbstractStreamCallback; verbose::Bool = false, kwargs...)

Build the response body from the chunks to mimic receiving a standard response from the API.

Note: Limited functionality for now. Does NOT support tool use. Use standard responses for these.

source


# PromptingTools.build_response_bodyMethod.
julia
build_response_body(flavor::OpenAIStream, cb::AbstractStreamCallback; verbose::Bool = false, kwargs...)

Build the response body from the chunks to mimic receiving a standard response from the API.

Note: Limited functionality for now. Does NOT support tool use, refusals, logprobs. Use standard responses for these.

source


# PromptingTools.build_template_metadataFunction.
julia
build_template_metadata(\n    template::AbstractVector{<:AbstractMessage}, template_name::Symbol,\n    metadata_msgs::AbstractVector{<:MetadataMessage} = MetadataMessage[]; max_length::Int = 100)

Builds AITemplateMetadata for a given template based on the messages in template and other information.

AITemplateMetadata is a helper struct for easy searching and reviewing of templates via aitemplates().

Note: Assumes that there is only ever one UserMessage and SystemMessage (concatenates them together)

source


# PromptingTools.call_costMethod.
julia
call_cost(prompt_tokens::Int, completion_tokens::Int, model::String;\n    cost_of_token_prompt::Number = get(MODEL_REGISTRY,\n        model,\n        (; cost_of_token_prompt = 0.0)).cost_of_token_prompt,\n    cost_of_token_generation::Number = get(MODEL_REGISTRY, model,\n        (; cost_of_token_generation = 0.0)).cost_of_token_generation)\n\ncall_cost(msg, model::String)

Calculate the cost of a call based on the number of tokens in the message and the cost per token.

Arguments

Returns

Examples

julia
# Assuming MODEL_REGISTRY is set up with appropriate costs\nMODEL_REGISTRY = Dict(\n    "model1" => (cost_of_token_prompt = 0.05, cost_of_token_generation = 0.10),\n    "model2" => (cost_of_token_prompt = 0.07, cost_of_token_generation = 0.02)\n)\n\ncost1 = call_cost(10, 20, "model1")\n\n# from message\nmsg1 = AIMessage(;tokens=[10, 20])  # 10 prompt tokens, 20 generation tokens\ncost1 = call_cost(msg1, "model1")\n# cost1 = 10 * 0.05 + 20 * 0.10 = 2.5\n\n# Using custom token costs\ncost2 = call_cost(10, 20, "model3"; cost_of_token_prompt = 0.08, cost_of_token_generation = 0.12)\n# cost2 = 10 * 0.08 + 20 * 0.12 = 3.2

source


# PromptingTools.call_cost_alternativeMethod.

call_cost_alternative()

Alternative cost calculation. Used to calculate cost of image generation with DALL-E 3 and similar.

source


# PromptingTools.callbackMethod.
julia
callback(cb::AbstractStreamCallback, chunk::StreamChunk; kwargs...)

Process the chunk to be printed and print it. It's a wrapper for two operations:

source


# PromptingTools.configure_callback!Method.
julia
configure_callback!(cb::StreamCallback, schema::AbstractPromptSchema;\n    api_kwargs...)

Configures the callback cb for streaming with a given prompt schema.

If no cb.flavor is provided, adjusts the flavor and the provided api_kwargs as necessary. Eg, for most schemas, we add kwargs like stream = true to the api_kwargs.

If cb.flavor is provided, both callback and api_kwargs are left unchanged! You need to configure them yourself!

source


', 73)), + _cache[49] || (_cache[49] = createStaticVNode('
# PromptingTools.aiembedFunction.
julia
aiembed(tracer_schema::AbstractTracerSchema,\n    doc_or_docs::Union{AbstractString, AbstractVector{<:AbstractString}}, postprocess::Function = identity;\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiembed call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

source


# PromptingTools.aiembedMethod.
julia
aiembed(prompt_schema::AbstractOllamaManagedSchema,\n        doc_or_docs::Union{AbstractString, AbstractVector{<:AbstractString}},\n        postprocess::F = identity;\n        verbose::Bool = true,\n        api_key::String = "",\n        model::String = MODEL_EMBEDDING,\n        http_kwargs::NamedTuple = (retry_non_idempotent = true,\n                                   retries = 5,\n                                   readtimeout = 120),\n        api_kwargs::NamedTuple = NamedTuple(),\n        kwargs...) where {F <: Function}

The aiembed function generates embeddings for the given input using a specified model and returns a message object containing the embeddings, status, token count, and elapsed time.

Arguments

Returns

Note: Ollama API currently does not return the token count, so it's set to (0,0)

Example

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\n\nmsg = aiembed(schema, "Hello World"; model="openhermes2.5-mistral")\nmsg.content # 4096-element JSON3.Array{Float64...

We can embed multiple strings at once and they will be hcat into a matrix (ie, each column corresponds to one string)

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\n\nmsg = aiembed(schema, ["Hello World", "How are you?"]; model="openhermes2.5-mistral")\nmsg.content # 4096×2 Matrix{Float64}:

If you plan to calculate the cosine distance between embeddings, you can normalize them first:

julia
const PT = PromptingTools\nusing LinearAlgebra\nschema = PT.OllamaManagedSchema()\n\nmsg = aiembed(schema, ["embed me", "and me too"], LinearAlgebra.normalize; model="openhermes2.5-mistral")\n\n# calculate cosine distance between the two normalized embeddings as a simple dot product\nmsg.content' * msg.content[:, 1] # [1.0, 0.34]

Similarly, you can use the postprocess argument to materialize the data from JSON3.Object by using postprocess = copy

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\n\nmsg = aiembed(schema, "Hello World", copy; model="openhermes2.5-mistral")\nmsg.content # 4096-element Vector{Float64}

source


# PromptingTools.aiembedMethod.
julia
aiembed(prompt_schema::AbstractOpenAISchema,\n        doc_or_docs::Union{AbstractString, AbstractVector{<:AbstractString}},\n        postprocess::F = identity;\n        verbose::Bool = true,\n        api_key::String = OPENAI_API_KEY,\n        model::String = MODEL_EMBEDDING, \n        http_kwargs::NamedTuple = (retry_non_idempotent = true,\n                                   retries = 5,\n                                   readtimeout = 120),\n        api_kwargs::NamedTuple = NamedTuple(),\n        kwargs...) where {F <: Function}

The aiembed function generates embeddings for the given input using a specified model and returns a message object containing the embeddings, status, token count, and elapsed time.

Arguments

Returns

Example

julia
msg = aiembed("Hello World")\nmsg.content # 1536-element JSON3.Array{Float64...

We can embed multiple strings at once and they will be hcat into a matrix (ie, each column corresponds to one string)

julia
msg = aiembed(["Hello World", "How are you?"])\nmsg.content # 1536×2 Matrix{Float64}:

If you plan to calculate the cosine distance between embeddings, you can normalize them first:

julia
using LinearAlgebra\nmsg = aiembed(["embed me", "and me too"], LinearAlgebra.normalize)\n\n# calculate cosine distance between the two normalized embeddings as a simple dot product\nmsg.content' * msg.content[:, 1] # [1.0, 0.787]

source


# PromptingTools.aiextractMethod.
julia
aiextract(prompt_schema::AbstractAnthropicSchema, prompt::ALLOWED_PROMPT_TYPE;\n    return_type::Union{Type, AbstractTool, Vector},\n    verbose::Bool = true,\n    api_key::String = ANTHROPIC_API_KEY,\n    model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    no_system_message::Bool = false,\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = NamedTuple(),\n    cache::Union{Nothing, Symbol} = nothing,\n    kwargs...)

Extract required information (defined by a struct return_type) from the provided prompt by leveraging Anthropic's function calling mode.

This is a perfect solution for extracting structured information from text (eg, extract organization names in news articles, etc.).

Read best practics here.

It's effectively a light wrapper around aigenerate call, which requires additional keyword argument return_type to be provided and will enforce the model outputs to adhere to it.

Arguments

Note: At the moment, the cache is only allowed for prompt segments over 1024 tokens (in some cases, over 2048 tokens). You'll get an error if you try to cache short prompts.

Returns

If return_all=false (default):

If return_all=true:

See also: tool_call_signature, MaybeExtract, ItemsExtract, aigenerate

Example

Do you want to extract some specific measurements from a text like age, weight and height? You need to define the information you need as a struct (return_type):

"Person's age, height, and weight."\nstruct MyMeasurement\n    age::Int # required\n    height::Union{Int,Nothing} # optional\n    weight::Union{Nothing,Float64} # optional\nend\nmsg = aiextract("James is 30, weighs 80kg. He's 180cm tall."; model="claudeh", return_type=MyMeasurement)\n# PromptingTools.DataMessage(MyMeasurement)\nmsg.content\n# MyMeasurement(30, 180, 80.0)

The fields that allow Nothing are marked as optional in the schema:

msg = aiextract("James is 30."; model="claudeh", return_type=MyMeasurement)\n# MyMeasurement(30, nothing, nothing)

If there are multiple items you want to extract, define a wrapper struct to get a Vector of MyMeasurement:

struct ManyMeasurements\n    measurements::Vector{MyMeasurement}\nend\n\nmsg = aiextract("James is 30, weighs 80kg. He's 180cm tall. Then Jack is 19 but really tall - over 190!"; model="claudeh", return_type=ManyMeasurements)\n\nmsg.content.measurements\n# 2-element Vector{MyMeasurement}:\n#  MyMeasurement(30, 180, 80.0)\n#  MyMeasurement(19, 190, nothing)

Or you can use the convenience wrapper ItemsExtract to extract multiple measurements (zero, one or more):

julia
using PromptingTools: ItemsExtract\n\nreturn_type = ItemsExtract{MyMeasurement}\nmsg = aiextract("James is 30, weighs 80kg. He's 180cm tall. Then Jack is 19 but really tall - over 190!"; model="claudeh", return_type)\n\nmsg.content.items # see the extracted items

Or if you want your extraction to fail gracefully when data isn't found, use MaybeExtract{T} wrapper (this trick is inspired by the Instructor package!):

using PromptingTools: MaybeExtract\n\nreturn_type = MaybeExtract{MyMeasurement}\n# Effectively the same as:\n# struct MaybeExtract{T}\n#     result::Union{T, Nothing} // The result of the extraction\n#     error::Bool // true if a result is found, false otherwise\n#     message::Union{Nothing, String} // Only present if no result is found, should be short and concise\n# end\n\n# If LLM extraction fails, it will return a Dict with `error` and `message` fields instead of the result!\nmsg = aiextract("Extract measurements from the text: I am giraffe"; model="claudeo", return_type)\nmsg.content\n# Output: MaybeExtract{MyMeasurement}(nothing, true, "I'm sorry, but your input of "I am giraffe" does not contain any information about a person's age, height or weight measurements that I can extract. To use this tool, please provide a statement that includes at least the person's age, and optionally their height in inches and weight in pounds. Without that information, I am unable to extract the requested measurements.")

That way, you can handle the error gracefully and get a reason why extraction failed (in msg.content.message).

However, this can fail with weaker models like claudeh, so we can apply some of our prompt templates with embedding reasoning step:

julia
msg = aiextract(:ExtractDataCoTXML; data="I am giraffe", model="claudeh", return_type)\nmsg.content\n# Output: MaybeExtract{MyMeasurement}(nothing, true, "The provided data does not contain the expected information about a person's age, height, and weight.")

Note that when using a prompt template, we provide data for the extraction as the corresponding placeholder (see aitemplates("extract") for documentation of this template).

Note that the error message refers to a giraffe not being a human, because in our MyMeasurement docstring, we said that it's for people!

Example of using a vector of field names with aiextract

julia
fields = [:location, :temperature => Float64, :condition => String]\nmsg = aiextract("Extract the following information from the text: location, temperature, condition. Text: The weather in New York is sunny and 72.5 degrees Fahrenheit."; \nreturn_type = fields, model="claudeh")

Or simply call aiextract("some text"; return_type = [:reasoning,:answer], model="claudeh") to get a Chain of Thought reasoning for extraction task.

It will be returned it a new generated type, which you can check with PromptingTools.isextracted(msg.content) == true to confirm the data has been extracted correctly.

This new syntax also allows you to provide field-level descriptions, which will be passed to the model.

julia
fields_with_descriptions = [\n    :location,\n    :temperature => Float64,\n    :temperature__description => "Temperature in degrees Fahrenheit",\n    :condition => String,\n    :condition__description => "Current weather condition (e.g., sunny, rainy, cloudy)"\n]\nmsg = aiextract("The weather in New York is sunny and 72.5 degrees Fahrenheit."; return_type = fields_with_descriptions, model="claudeh")

source


# PromptingTools.aiextractMethod.
julia
aiextract(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;\n    return_type::Union{Type, AbstractTool, Vector},\n    verbose::Bool = true,\n    api_key::String = OPENAI_API_KEY,\n    model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = (;\n        tool_choice = nothing),\n    strict::Union{Nothing, Bool} = nothing,\n    kwargs...)

Extract required information (defined by a struct return_type) from the provided prompt by leveraging OpenAI function calling mode.

This is a perfect solution for extracting structured information from text (eg, extract organization names in news articles, etc.)

It's effectively a light wrapper around aigenerate call, which requires additional keyword argument return_type to be provided and will enforce the model outputs to adhere to it.

Arguments

Returns

If return_all=false (default):

If return_all=true:

Note: msg.content can be a single object (if a single tool is used) or a vector of objects (if multiple tools are used)!

See also: tool_call_signature, MaybeExtract, ItemsExtract, aigenerate, generate_struct

Example

Do you want to extract some specific measurements from a text like age, weight and height? You need to define the information you need as a struct (return_type):

"Person's age, height, and weight."\nstruct MyMeasurement\n    age::Int # required\n    height::Union{Int,Nothing} # optional\n    weight::Union{Nothing,Float64} # optional\nend\nmsg = aiextract("James is 30, weighs 80kg. He's 180cm tall."; return_type=MyMeasurement)\n# PromptingTools.DataMessage(MyMeasurement)\nmsg.content\n# MyMeasurement(30, 180, 80.0)

The fields that allow Nothing are marked as optional in the schema:

msg = aiextract("James is 30."; return_type=MyMeasurement)\n# MyMeasurement(30, nothing, nothing)

If there are multiple items you want to extract, define a wrapper struct to get a Vector of MyMeasurement:

struct ManyMeasurements\n    measurements::Vector{MyMeasurement}\nend\n\nmsg = aiextract("James is 30, weighs 80kg. He's 180cm tall. Then Jack is 19 but really tall - over 190!"; return_type=ManyMeasurements)\n\nmsg.content.measurements\n# 2-element Vector{MyMeasurement}:\n#  MyMeasurement(30, 180, 80.0)\n#  MyMeasurement(19, 190, nothing)

Or you can use the convenience wrapper ItemsExtract to extract multiple measurements (zero, one or more):

julia
using PromptingTools: ItemsExtract\n\nreturn_type = ItemsExtract{MyMeasurement}\nmsg = aiextract("James is 30, weighs 80kg. He's 180cm tall. Then Jack is 19 but really tall - over 190!"; return_type)\n\nmsg.content.items # see the extracted items

Or if you want your extraction to fail gracefully when data isn't found, use MaybeExtract{T} wrapper (this trick is inspired by the Instructor package!):

using PromptingTools: MaybeExtract\n\nreturn_type = MaybeExtract{MyMeasurement}\n# Effectively the same as:\n# struct MaybeExtract{T}\n#     result::Union{T, Nothing} // The result of the extraction\n#     error::Bool // true if a result is found, false otherwise\n#     message::Union{Nothing, String} // Only present if no result is found, should be short and concise\n# end\n\n# If LLM extraction fails, it will return a Dict with `error` and `message` fields instead of the result!\nmsg = aiextract("Extract measurements from the text: I am giraffe"; return_type)\nmsg.content\n# MaybeExtract{MyMeasurement}(nothing, true, "I'm sorry, but I can only assist with human measurements.")

That way, you can handle the error gracefully and get a reason why extraction failed (in msg.content.message).

Note that the error message refers to a giraffe not being a human, because in our MyMeasurement docstring, we said that it's for people!

Some non-OpenAI providers require a different specification of the "tool choice" than OpenAI. For example, to use Mistral models ("mistrall" for mistral large), do:

julia
"Some fruit"\nstruct Fruit\n    name::String\nend\naiextract("I ate an apple",return_type=Fruit,api_kwargs=(;tool_choice="any"),model="mistrall")\n# Notice two differences: 1) struct MUST have a docstring, 2) tool_choice is set explicitly set to "any"

Example of using a vector of field names with aiextract

julia
fields = [:location, :temperature => Float64, :condition => String]\nmsg = aiextract("Extract the following information from the text: location, temperature, condition. Text: The weather in New York is sunny and 72.5 degrees Fahrenheit."; return_type = fields)

Or simply call aiextract("some text"; return_type = [:reasoning,:answer]) to get a Chain of Thought reasoning for extraction task.

It will be returned it a new generated type, which you can check with PromptingTools.isextracted(msg.content) == true to confirm the data has been extracted correctly.

This new syntax also allows you to provide field-level descriptions, which will be passed to the model.

julia
fields_with_descriptions = [\n    :location,\n    :temperature => Float64,\n    :temperature__description => "Temperature in degrees Fahrenheit",\n    :condition => String,\n    :condition__description => "Current weather condition (e.g., sunny, rainy, cloudy)"\n]\nmsg = aiextract("The weather in New York is sunny and 72.5 degrees Fahrenheit."; return_type = fields_with_descriptions)

If you feel that the extraction is not smart/creative enough, you can use json_mode = true to enforce the JSON mode, which automatically enables the structured output mode (as opposed to function calling mode).

The JSON mode is useful for cases when you want to enforce a specific output format, such as JSON, and want the model to adhere to that format, but don't want to pretend it's a "function call". Expect a few second delay on the first call for a specific struct, as the provider has to produce the constrained grammer first.

julia
msg = aiextract("Extract the following information from the text: location, temperature, condition. Text: The weather in New York is sunny and 72.5 degrees Fahrenheit."; \nreturn_type = fields_with_descriptions, json_mode = true)\n# PromptingTools.DataMessage(NamedTuple)\n\nmsg.content\n# (location = "New York", temperature = 72.5, condition = "sunny")

It works equally well for structs provided as return types:

julia
msg = aiextract("James is 30, weighs 80kg. He's 180cm tall."; return_type=MyMeasurement, json_mode=true)

source


# PromptingTools.aiextractMethod.
julia
aiextract(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiextract call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractAnthropicSchema, prompt::ALLOWED_PROMPT_TYPE; verbose::Bool = true,\n    api_key::String = ANTHROPIC_API_KEY, model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    streamcallback::Any = nothing,\n    no_system_message::Bool = false,\n    aiprefill::Union{Nothing, AbstractString} = nothing,\n    http_kwargs::NamedTuple = NamedTuple(), api_kwargs::NamedTuple = NamedTuple(),\n    cache::Union{Nothing, Symbol} = nothing,\n    kwargs...)

Generate an AI response based on a given prompt using the Anthropic API.

Arguments

Note: At the moment, the cache is only allowed for prompt segments over 1024 tokens (in some cases, over 2048 tokens). You'll get an error if you try to cache short prompts.

Returns

Use msg.content to access the extracted string.

See also: ai_str, aai_str

Example

Simple hello world to test the API:

julia
const PT = PromptingTools\nschema = PT.AnthropicSchema() # We need to explicit if we want Anthropic, otherwise OpenAISchema is the default\n\nmsg = aigenerate(schema, "Say hi!"; model="claudeh") #claudeh is the model alias for Claude 3 Haiku, fast and cheap model\n[ Info: Tokens: 21 @ Cost: $0.0 in 0.6 seconds\nAIMessage("Hello!")

msg is an AIMessage object. Access the generated string via content property:

julia
typeof(msg) # AIMessage{SubString{String}}\npropertynames(msg) # (:content, :status, :tokens, :elapsed, :cost, :log_prob, :finish_reason, :run_id, :sample_id, :_type)\nmsg.content # "Hello!

Note: We need to be explicit about the schema we want to use. If we don't, it will default to OpenAISchema (=PT.DEFAULT_SCHEMA) Alternatively, if you provide a known model name or alias (eg, claudeh for Claude 3 Haiku - see MODEL_REGISTRY), the schema will be inferred from the model name.

We will use Claude 3 Haiku model for the following examples, so not need to specify the schema. See also "claudeo" and "claudes" for other Claude 3 models.

You can use string interpolation:

julia
const PT = PromptingTools\n\na = 1\nmsg=aigenerate("What is `$a+$a`?"; model="claudeh")\nmsg.content # "The answer to `1+1` is `2`."

___ You can provide the whole conversation or more intricate prompts as a Vector{AbstractMessage}. Claude models are good at completeling conversations that ended with an AIMessage (they just continue where it left off):

julia
const PT = PromptingTools\n\nconversation = [\n    PT.SystemMessage("You're master Yoda from Star Wars trying to help the user become a Yedi."),\n    PT.UserMessage("I have feelings for my iPhone. What should I do?"),\n    PT.AIMessage("Hmm, strong the attachment is,")]\n\nmsg = aigenerate(conversation; model="claudeh")\nAIMessage("I sense. But unhealthy it may be. Your iPhone, a tool it is, not a living being. Feelings of affection, understandable they are, <continues>")

Example of streaming:

julia
# Simplest usage, just provide where to steam the text\nmsg = aigenerate("Count from 1 to 100."; streamcallback = stdout, model="claudeh")\n\nstreamcallback = PT.StreamCallback()\nmsg = aigenerate("Count from 1 to 100."; streamcallback, model="claudeh")\n# this allows you to inspect each chunk with `streamcallback.chunks`. You can them empty it with `empty!(streamcallback)` in between repeated calls.\n\n# Get verbose output with details of each chunk\nstreamcallback = PT.StreamCallback(; verbose=true, throw_on_error=true)\nmsg = aigenerate("Count from 1 to 10."; streamcallback, model="claudeh")

Note: Streaming support is only for Anthropic models and it doesn't yet support tool calling and a few other features (logprobs, refusals, etc.)

You can also provide a prefill for the AI response to steer the response in a certain direction (eg, formatting, style):

julia
msg = aigenerate("Sum up 1 to 100."; aiprefill = "I'd be happy to answer in one number without any additional text. The answer is:", model="claudeh")

Note: It MUST NOT end with a trailing with space. You'll get an API error if you do.

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractGoogleSchema, prompt::ALLOWED_PROMPT_TYPE;\n    verbose::Bool = true,\n    api_key::String = GOOGLE_API_KEY,\n    model::String = "gemini-pro", return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    no_system_message::Bool = false,\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = NamedTuple(),\n    kwargs...)

Generate an AI response based on a given prompt using the Google Gemini API. Get the API key here.

Note:

Arguments

Returns

If return_all=false (default):

Use msg.content to access the extracted string.

If return_all=true:

See also: ai_str, aai_str, aiembed, aiclassify, aiextract, aiscan, aitemplates

Example

Simple hello world to test the API:

julia
result = aigenerate("Say Hi!"; model="gemini-pro")\n# AIMessage("Hi there! 👋 I'm here to help you with any questions or tasks you may have. Just let me know what you need, and I'll do my best to assist you.")

result is an AIMessage object. Access the generated string via content property:

julia
typeof(result) # AIMessage{SubString{String}}\npropertynames(result) # (:content, :status, :tokens, :elapsed\nresult.content # "Hi there! ...

___ You can use string interpolation and alias "gemini":

julia
a = 1\nmsg=aigenerate("What is `$a+$a`?"; model="gemini")\nmsg.content # "1+1 is 2."

___ You can provide the whole conversation or more intricate prompts as a Vector{AbstractMessage}:

julia
const PT = PromptingTools\n\nconversation = [\n    PT.SystemMessage("You're master Yoda from Star Wars trying to help the user become a Yedi."),\n    PT.UserMessage("I have feelings for my iPhone. What should I do?")]\nmsg=aigenerate(conversation; model="gemini")\n# AIMessage("Young Padawan, you have stumbled into a dangerous path.... <continues>")

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractOllamaManagedSchema, prompt::ALLOWED_PROMPT_TYPE; verbose::Bool = true,\n    api_key::String = "", model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    http_kwargs::NamedTuple = NamedTuple(), api_kwargs::NamedTuple = NamedTuple(),\n    kwargs...)

Generate an AI response based on a given prompt using the OpenAI API.

Arguments

Returns

Use msg.content to access the extracted string.

See also: ai_str, aai_str, aiembed

Example

Simple hello world to test the API:

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema() # We need to explicit if we want Ollama, OpenAISchema is the default\n\nmsg = aigenerate(schema, "Say hi!"; model="openhermes2.5-mistral")\n# [ Info: Tokens: 69 in 0.9 seconds\n# AIMessage("Hello! How can I assist you today?")

msg is an AIMessage object. Access the generated string via content property:

julia
typeof(msg) # AIMessage{SubString{String}}\npropertynames(msg) # (:content, :status, :tokens, :elapsed\nmsg.content # "Hello! How can I assist you today?"

Note: We need to be explicit about the schema we want to use. If we don't, it will default to OpenAISchema (=PT.DEFAULT_SCHEMA) ___ You can use string interpolation:

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\na = 1\nmsg=aigenerate(schema, "What is `$a+$a`?"; model="openhermes2.5-mistral")\nmsg.content # "The result of `1+1` is `2`."

___ You can provide the whole conversation or more intricate prompts as a Vector{AbstractMessage}:

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\n\nconversation = [\n    PT.SystemMessage("You're master Yoda from Star Wars trying to help the user become a Yedi."),\n    PT.UserMessage("I have feelings for my iPhone. What should I do?")]\n\nmsg = aigenerate(schema, conversation; model="openhermes2.5-mistral")\n# [ Info: Tokens: 111 in 2.1 seconds\n# AIMessage("Strong the attachment is, it leads to suffering it may. Focus on the force within you must, ...<continues>")

Note: Managed Ollama currently supports at most 1 User Message and 1 System Message given the API limitations. If you want more, you need to use the ChatMLSchema.

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractOllamaManagedSchema, prompt::ALLOWED_PROMPT_TYPE; verbose::Bool = true,\n    api_key::String = "", model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    http_kwargs::NamedTuple = NamedTuple(), api_kwargs::NamedTuple = NamedTuple(),\n    kwargs...)

Generate an AI response based on a given prompt using the OpenAI API.

Arguments

Returns

Use msg.content to access the extracted string.

See also: ai_str, aai_str, aiembed

Example

Simple hello world to test the API:

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema() # We need to explicit if we want Ollama, OpenAISchema is the default\n\nmsg = aigenerate(schema, "Say hi!"; model="openhermes2.5-mistral")\n# [ Info: Tokens: 69 in 0.9 seconds\n# AIMessage("Hello! How can I assist you today?")

msg is an AIMessage object. Access the generated string via content property:

julia
typeof(msg) # AIMessage{SubString{String}}\npropertynames(msg) # (:content, :status, :tokens, :elapsed\nmsg.content # "Hello! How can I assist you today?"

Note: We need to be explicit about the schema we want to use. If we don't, it will default to OpenAISchema (=PT.DEFAULT_SCHEMA) ___ You can use string interpolation:

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\na = 1\nmsg=aigenerate(schema, "What is `$a+$a`?"; model="openhermes2.5-mistral")\nmsg.content # "The result of `1+1` is `2`."

___ You can provide the whole conversation or more intricate prompts as a Vector{AbstractMessage}:

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\n\nconversation = [\n    PT.SystemMessage("You're master Yoda from Star Wars trying to help the user become a Yedi."),\n    PT.UserMessage("I have feelings for my iPhone. What should I do?")]\n\nmsg = aigenerate(schema, conversation; model="openhermes2.5-mistral")\n# [ Info: Tokens: 111 in 2.1 seconds\n# AIMessage("Strong the attachment is, it leads to suffering it may. Focus on the force within you must, ...<continues>")

Note: Managed Ollama currently supports at most 1 User Message and 1 System Message given the API limitations. If you want more, you need to use the ChatMLSchema.

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;\n    verbose::Bool = true,\n    api_key::String = OPENAI_API_KEY,\n    model::String = MODEL_CHAT, return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    streamcallback::Any = nothing,\n    no_system_message::Bool = false,\n    name_user::Union{Nothing, String} = nothing,\n    name_assistant::Union{Nothing, String} = nothing,\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = NamedTuple(),\n    kwargs...)

Generate an AI response based on a given prompt using the OpenAI API.

Arguments

Returns

If return_all=false (default):

Use msg.content to access the extracted string.

If return_all=true:

See also: ai_str, aai_str, aiembed, aiclassify, aiextract, aiscan, aitemplates

Example

Simple hello world to test the API:

julia
result = aigenerate("Say Hi!")\n# [ Info: Tokens: 29 @ Cost: $0.0 in 1.0 seconds\n# AIMessage("Hello! How can I assist you today?")

result is an AIMessage object. Access the generated string via content property:

julia
typeof(result) # AIMessage{SubString{String}}\npropertynames(result) # (:content, :status, :tokens, :elapsed\nresult.content # "Hello! How can I assist you today?"

___ You can use string interpolation:

julia
a = 1\nmsg=aigenerate("What is `$a+$a`?")\nmsg.content # "The sum of `1+1` is `2`."

___ You can provide the whole conversation or more intricate prompts as a Vector{AbstractMessage}:

julia
const PT = PromptingTools\n\nconversation = [\n    PT.SystemMessage("You're master Yoda from Star Wars trying to help the user become a Yedi."),\n    PT.UserMessage("I have feelings for my iPhone. What should I do?")]\nmsg=aigenerate(conversation)\n# AIMessage("Ah, strong feelings you have for your iPhone. A Jedi's path, this is not... <continues>")

Example of streaming:

julia
# Simplest usage, just provide where to steam the text\nmsg = aigenerate("Count from 1 to 100."; streamcallback = stdout)\n\nstreamcallback = PT.StreamCallback()\nmsg = aigenerate("Count from 1 to 100."; streamcallback)\n# this allows you to inspect each chunk with `streamcallback.chunks`. You can them empty it with `empty!(streamcallback)` in between repeated calls.\n\n# Get verbose output with details of each chunk\nstreamcallback = PT.StreamCallback(; verbose=true, throw_on_error=true)\nmsg = aigenerate("Count from 1 to 10."; streamcallback)

Learn more in ?StreamCallback. Note: Streaming support is only for OpenAI models and it doesn't yet support tool calling and a few other features (logprobs, refusals, etc.)

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;\n    tracer_kwargs = NamedTuple(), model = "", return_all::Bool = false, kwargs...)

Wraps the normal aigenerate call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

Example

julia
wrap_schema = PT.TracerSchema(PT.OpenAISchema())\nmsg = aigenerate(wrap_schema, "Say hi!"; model = "gpt4t")\nmsg isa TracerMessage # true\nmsg.content # access content like if it was the message\nPT.pprint(msg) # pretty-print the message

It works on a vector of messages and converts only the non-tracer ones, eg,

julia
wrap_schema = PT.TracerSchema(PT.OpenAISchema())\nconv = aigenerate(wrap_schema, "Say hi!"; model = "gpt4t", return_all = true)\nall(PT.istracermessage, conv) #true

source


# PromptingTools.aiimageMethod.
julia
aiimage(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;\n    image_size::AbstractString = "1024x1024",\n    image_quality::AbstractString = "standard",\n    image_n::Integer = 1,\n    verbose::Bool = true,\n    api_key::String = OPENAI_API_KEY,\n    model::String = MODEL_IMAGE_GENERATION,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = NamedTuple(),\n    kwargs...)

Generates an image from the provided prompt. If multiple "messages" are provided in prompt, it extracts the text ONLY from the last message!

Image (or the reference to it) will be returned in a DataMessage.content, the format will depend on the api_kwargs.response_format you set.

Can be used for generating images of varying quality and style with dall-e-* models. This function DOES NOT SUPPORT multi-turn conversations (ie, do not provide previous conversation via conversation argument).

Arguments

Returns

If return_all=false (default):

Use msg.content to access the extracted string.

If return_all=true:

See also: ai_str, aai_str, aigenerate, aiembed, aiclassify, aiextract, aiscan, aitemplates

Notes

Example

Generate an image:

julia
# You can experiment with `image_size`, `image_quality` kwargs!\nmsg = aiimage("A white cat on a car")\n\n# Download the image into a file\nusing Downloads\nDownloads.download(msg.content[:url], "cat_on_car.png")\n\n# You can also see the revised prompt that DALL-E 3 used\nmsg.content[:revised_prompt]\n# Output: "Visualize a pristine white cat gracefully perched atop a shiny car. \n# The cat's fur is stark white and its eyes bright with curiosity. \n# As for the car, it could be a contemporary sedan, glossy and in a vibrant color. \n# The scene could be set under the blue sky, enhancing the contrast between the white cat, the colorful car, and the bright blue sky."

Note that you MUST download any URL-based images within 60 minutes. The links will become inactive.

If you wanted to download image directly into the DataMessage, provide response_format="b64_json" in api_kwargs:

julia
msg = aiimage("A white cat on a car"; image_quality="hd", api_kwargs=(; response_format="b64_json"))\n\n# Then you need to use Base64 package to decode it and save it to a file:\nusing Base64\nwrite("cat_on_car_hd.png", base64decode(msg.content[:b64_json]));

source


# PromptingTools.aiimageMethod.
julia
aiimage(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiimage call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

source


# PromptingTools.aiscanMethod.
julia
aiscan([prompt_schema::AbstractOllamaSchema,] prompt::ALLOWED_PROMPT_TYPE; \nimage_url::Union{Nothing, AbstractString, Vector{<:AbstractString}} = nothing,\nimage_path::Union{Nothing, AbstractString, Vector{<:AbstractString}} = nothing,\nattach_to_latest::Bool = true,\nverbose::Bool = true, api_key::String = OPENAI_API_KEY,\n    model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    http_kwargs::NamedTuple = (;\n        retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), \n    api_kwargs::NamedTuple = = (; max_tokens = 2500),\n    kwargs...)

Scans the provided image (image_url or image_path) with the goal provided in the prompt.

Can be used for many multi-modal tasks, such as: OCR (transcribe text in the image), image captioning, image classification, etc.

It's effectively a light wrapper around aigenerate call, which uses additional keyword arguments image_url, image_path, image_detail to be provided. At least one image source (url or path) must be provided.

Arguments

Returns

If return_all=false (default):

Use msg.content to access the extracted string.

If return_all=true:

See also: ai_str, aai_str, aigenerate, aiembed, aiclassify, aiextract, aitemplates

Notes

Example

Describe the provided image:

julia
msg = aiscan("Describe the image"; image_path="julia.png", model="bakllava")\n# [ Info: Tokens: 1141 @ Cost: $0.0117 in 2.2 seconds\n# AIMessage("The image shows a logo consisting of the word "julia" written in lowercase")

You can provide multiple images at once as a vector and ask for "low" level of detail (cheaper):

julia
msg = aiscan("Describe the image"; image_path=["julia.png","python.png"] model="bakllava")

You can use this function as a nice and quick OCR (transcribe text in the image) with a template :OCRTask. Let's transcribe some SQL code from a screenshot (no more re-typing!):

julia
using Downloads\n# Screenshot of some SQL code -- we cannot use image_url directly, so we need to download it first\nimage_url = "https://www.sqlservercentral.com/wp-content/uploads/legacy/8755f69180b7ac7ee76a69ae68ec36872a116ad4/24622.png"\nimage_path = Downloads.download(image_url)\nmsg = aiscan(:OCRTask; image_path, model="bakllava", task="Transcribe the SQL code in the image.", api_kwargs=(; max_tokens=2500))\n\n# AIMessage("```sql\n# update Orders <continue>\n\n# You can add syntax highlighting of the outputs via Markdown\nusing Markdown\nmsg.content |> Markdown.parse

Local models cannot handle image URLs directly (image_url), so you need to download the image first and provide it as image_path:

julia
using Downloads\nimage_path = Downloads.download(image_url)

Notice that we set max_tokens = 2500. If your outputs seem truncated, it might be because the default maximum tokens on the server is set too low!

source


# PromptingTools.aiscanMethod.
julia
aiscan([prompt_schema::AbstractOpenAISchema,] prompt::ALLOWED_PROMPT_TYPE; \nimage_url::Union{Nothing, AbstractString, Vector{<:AbstractString}} = nothing,\nimage_path::Union{Nothing, AbstractString, Vector{<:AbstractString}} = nothing,\nimage_detail::AbstractString = "auto",\nattach_to_latest::Bool = true,\nverbose::Bool = true, api_key::String = OPENAI_API_KEY,\n    model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    http_kwargs::NamedTuple = (;\n        retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), \n    api_kwargs::NamedTuple = = (; max_tokens = 2500),\n    kwargs...)

Scans the provided image (image_url or image_path) with the goal provided in the prompt.

Can be used for many multi-modal tasks, such as: OCR (transcribe text in the image), image captioning, image classification, etc.

It's effectively a light wrapper around aigenerate call, which uses additional keyword arguments image_url, image_path, image_detail to be provided. At least one image source (url or path) must be provided.

Arguments

Returns

If return_all=false (default):

Use msg.content to access the extracted string.

If return_all=true:

See also: ai_str, aai_str, aigenerate, aiembed, aiclassify, aiextract, aitemplates

Notes

Example

Describe the provided image:

julia
msg = aiscan("Describe the image"; image_path="julia.png", model="gpt4v")\n# [ Info: Tokens: 1141 @ Cost: $0.0117 in 2.2 seconds\n# AIMessage("The image shows a logo consisting of the word "julia" written in lowercase")

You can provide multiple images at once as a vector and ask for "low" level of detail (cheaper):

julia
msg = aiscan("Describe the image"; image_path=["julia.png","python.png"], image_detail="low", model="gpt4v")

You can use this function as a nice and quick OCR (transcribe text in the image) with a template :OCRTask. Let's transcribe some SQL code from a screenshot (no more re-typing!):

julia
# Screenshot of some SQL code\nimage_url = "https://www.sqlservercentral.com/wp-content/uploads/legacy/8755f69180b7ac7ee76a69ae68ec36872a116ad4/24622.png"\nmsg = aiscan(:OCRTask; image_url, model="gpt4v", task="Transcribe the SQL code in the image.", api_kwargs=(; max_tokens=2500))\n\n# [ Info: Tokens: 362 @ Cost: $0.0045 in 2.5 seconds\n# AIMessage("```sql\n# update Orders <continue>\n\n# You can add syntax highlighting of the outputs via Markdown\nusing Markdown\nmsg.content |> Markdown.parse

Notice that we enforce max_tokens = 2500. That's because OpenAI seems to default to ~300 tokens, which provides incomplete outputs. Hence, we set this value to 2500 as a default. If you still get truncated outputs, increase this value.

source


# PromptingTools.aiscanMethod.
julia
aiscan(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiscan call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

source


# PromptingTools.aitemplatesFunction.
julia
aitemplates

Find easily the most suitable templates for your use case.

You can search by:

Keyword Arguments

Examples

Find available templates with aitemplates:

julia
tmps = aitemplates("JuliaExpertAsk")\n# Will surface one specific template\n# 1-element Vector{AITemplateMetadata}:\n# PromptingTools.AITemplateMetadata\n#   name: Symbol JuliaExpertAsk\n#   description: String "For asking questions about Julia language. Placeholders: `ask`"\n#   version: String "1"\n#   wordcount: Int64 237\n#   variables: Array{Symbol}((1,))\n#   system_preview: String "You are a world-class Julia language programmer with the knowledge of the latest syntax. Your commun"\n#   user_preview: String "# Question\n\n{{ask}}"\n#   source: String ""

The above gives you a good idea of what the template is about, what placeholders are available, and how much it would cost to use it (=wordcount).

Search for all Julia-related templates:

julia
tmps = aitemplates("Julia")\n# 2-element Vector{AITemplateMetadata}... -> more to come later!

If you are on VSCode, you can leverage nice tabular display with vscodedisplay:

julia
using DataFrames\ntmps = aitemplates("Julia") |> DataFrame |> vscodedisplay

I have my selected template, how do I use it? Just use the "name" in aigenerate or aiclassify like you see in the first example!

source


# PromptingTools.aitemplatesMethod.

Find the top-limit templates whose name or description fields partially match the query_key::String in TEMPLATE_METADATA.

source


# PromptingTools.aitemplatesMethod.

Find the top-limit templates where provided query_key::Regex matches either of name, description or previews or User or System messages in TEMPLATE_METADATA.

source


# PromptingTools.aitemplatesMethod.

Find the top-limit templates whose name::Symbol exactly matches the query_name::Symbol in TEMPLATE_METADATA.

source


# PromptingTools.aitoolsMethod.
julia
aitools(prompt_schema::AbstractAnthropicSchema, prompt::ALLOWED_PROMPT_TYPE;\n    kwargs...)\n    tools::Union{Type, Function, Method, AbstractTool, Vector} = Tool[],\n    verbose::Bool = true,\n    api_key::String = ANTHROPIC_API_KEY,\n    model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    no_system_message::Bool = false,\n    cache::Union{Nothing, Symbol} = nothing,\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = (;\n        tool_choice = nothing),\n    kwargs...)

Calls chat completion API with an optional tool call signature. It can receive both tools and standard string-based content. Ideal for agentic workflows with more complex cognitive architectures.

Difference to aigenerate: Response can be a tool call (structured)

Differences to aiextract: Can provide infinitely many tools (including Functions!) and then respond with the tool call's output.

Arguments

Example

julia
## Let's define a tool\nget_weather(location, date) = "The weather in $location on $date is 70 degrees."\n\nmsg = aitools("What's the weather in Tokyo on May 3rd, 2023?";\n    tools = get_weather, model = "claudeh")\nPT.execute_tool(get_weather, msg.tool_calls[1].args)\n# "The weather in Tokyo on 2023-05-03 is 70 degrees."\n\n# Ignores the tool\nmsg = aitools("What's your name?";\n    tools = get_weather, model = "claudeh")\n# I don't have a personal name, but you can call me your AI assistant!

How to have a multi-turn conversation with tools:

julia
conv = aitools("What's the weather in Tokyo on May 3rd, 2023?";\n    tools = get_weather, return_all = true, model = "claudeh")\n\ntool_msg = conv[end].tool_calls[1] # there can be multiple tool calls requested!!\n\n# Execute the output to the tool message content\ntool_msg.content = PT.execute_tool(get_weather, tool_msg.args)\n\n# Add the tool message to the conversation\npush!(conv, tool_msg)\n\n# Call LLM again with the updated conversation\nconv = aitools(\n    "And in New York?"; tools = get_weather, return_all = true, conversation = conv, model = "claudeh")\n# 6-element Vector{AbstractMessage}:\n# SystemMessage("Act as a helpful AI assistant")\n# UserMessage("What's the weather in Tokyo on May 3rd, 2023?")\n# AIToolRequest("-"; Tool Requests: 1)\n# ToolMessage("The weather in Tokyo on 2023-05-03 is 70 degrees.")\n# UserMessage("And in New York?")\n# AIToolRequest("-"; Tool Requests: 1)

source


# PromptingTools.aitoolsMethod.
julia
aitools(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;\n    tools::Union{Type, Function, Method, AbstractTool, Vector} = Tool[],\n    verbose::Bool = true,\n    api_key::String = OPENAI_API_KEY,\n    model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    no_system_message::Bool = false,\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = (;\n        tool_choice = nothing),\n    strict::Union{Nothing, Bool} = nothing,\n    json_mode::Union{Nothing, Bool} = nothing,\n    name_user::Union{Nothing, String} = nothing,\n    name_assistant::Union{Nothing, String} = nothing,\n    kwargs...)

Calls chat completion API with an optional tool call signature. It can receive both tools and standard string-based content. Ideal for agentic workflows with more complex cognitive architectures.

Difference to aigenerate: Response can be a tool call (structured)

Differences to aiextract: Can provide infinitely many tools (including Functions!) and then respond with the tool call's output.

Arguments

Example

julia
## Let's define a tool\nget_weather(location, date) = "The weather in $location on $date is 70 degrees."\n\n## JSON mode request\nmsg = aitools("What's the weather in Tokyo on May 3rd, 2023?";\n    tools = get_weather,\n    json_mode = true)\nPT.execute_tool(get_weather, msg.tool_calls[1].args)\n# "The weather in Tokyo on 2023-05-03 is 70 degrees."\n\n# Function calling request\nmsg = aitools("What's the weather in Tokyo on May 3rd, 2023?";\n    tools = get_weather)\nPT.execute_tool(get_weather, msg.tool_calls[1].args)\n# "The weather in Tokyo on 2023-05-03 is 70 degrees."\n\n# Ignores the tool\nmsg = aitools("What's your name?";\n    tools = get_weather)\n# I don't have a personal name, but you can call me your AI assistant!

How to have a multi-turn conversation with tools:

julia
conv = aitools("What's the weather in Tokyo on May 3rd, 2023?";\n    tools = get_weather, return_all = true)\n\ntool_msg = conv[end].tool_calls[1] # there can be multiple tool calls requested!!\n\n# Execute the output to the tool message content\ntool_msg.content = PT.execute_tool(get_weather, tool_msg.args)\n\n# Add the tool message to the conversation\npush!(conv, tool_msg)\n\n# Call LLM again with the updated conversation\nconv = aitools(\n    "And in New York?"; tools = get_weather, return_all = true, conversation = conv)\n# 6-element Vector{AbstractMessage}:\n# SystemMessage("Act as a helpful AI assistant")\n# UserMessage("What's the weather in Tokyo on May 3rd, 2023?")\n# AIToolRequest("-"; Tool Requests: 1)\n# ToolMessage("The weather in Tokyo on 2023-05-03 is 70 degrees.")\n# UserMessage("And in New York?")\n# AIToolRequest("-"; Tool Requests: 1)

source


# PromptingTools.aitoolsMethod.
julia
aitools(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aitools call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

source


# PromptingTools.align_tracer!Method.

Aligns multiple tracers in the vector to have the same Parent and Thread IDs as the first item.

source


# PromptingTools.align_tracer!Method.

Aligns the tracer message, updating the parent_id, thread_id. Often used to align multiple tracers in the vector to have the same IDs.

source


# PromptingTools.anthropic_apiFunction.
julia
anthropic_api(\n    prompt_schema::AbstractAnthropicSchema,\n    messages::Vector{<:AbstractDict{String, <:Any}} = Vector{Dict{String, Any}}();\n    api_key::AbstractString = ANTHROPIC_API_KEY,\n    system::Union{Nothing, AbstractString, AbstractVector{<:AbstractDict}} = nothing,\n    endpoint::String = "messages",\n    max_tokens::Int = 2048,\n    model::String = "claude-3-haiku-20240307", http_kwargs::NamedTuple = NamedTuple(),\n    stream::Bool = false,\n    url::String = "https://api.anthropic.com/v1",\n    cache::Union{Nothing, Symbol} = nothing,\n    kwargs...)

Simple wrapper for a call to Anthropic API.

Keyword Arguments

source


# PromptingTools.anthropic_extra_headersMethod.
julia
anthropic_extra_headers

Adds API version and beta headers to the request.

Kwargs / Beta headers

source


# PromptingTools.auth_headerMethod.
julia
auth_header(api_key::Union{Nothing, AbstractString};\n    bearer::Bool = true,\n    x_api_key::Bool = false,\n    extra_headers::AbstractVector = Vector{\n        Pair{String, String},\n    }[],\n    kwargs...)

Creates the authentication headers for any API request. Assumes that the communication is done in JSON format.

Arguments

source


# PromptingTools.build_response_bodyMethod.
julia
build_response_body(\n    flavor::AnthropicStream, cb::AbstractStreamCallback; verbose::Bool = false, kwargs...)

Build the response body from the chunks to mimic receiving a standard response from the API.

Note: Limited functionality for now. Does NOT support tool use. Use standard responses for these.

source


# PromptingTools.build_response_bodyMethod.
julia
build_response_body(flavor::OpenAIStream, cb::AbstractStreamCallback; verbose::Bool = false, kwargs...)

Build the response body from the chunks to mimic receiving a standard response from the API.

Note: Limited functionality for now. Does NOT support tool use, refusals, logprobs. Use standard responses for these.

source


# PromptingTools.build_template_metadataFunction.
julia
build_template_metadata(\n    template::AbstractVector{<:AbstractMessage}, template_name::Symbol,\n    metadata_msgs::AbstractVector{<:MetadataMessage} = MetadataMessage[]; max_length::Int = 100)

Builds AITemplateMetadata for a given template based on the messages in template and other information.

AITemplateMetadata is a helper struct for easy searching and reviewing of templates via aitemplates().

Note: Assumes that there is only ever one UserMessage and SystemMessage (concatenates them together)

source


# PromptingTools.call_costMethod.
julia
call_cost(prompt_tokens::Int, completion_tokens::Int, model::String;\n    cost_of_token_prompt::Number = get(MODEL_REGISTRY,\n        model,\n        (; cost_of_token_prompt = 0.0)).cost_of_token_prompt,\n    cost_of_token_generation::Number = get(MODEL_REGISTRY, model,\n        (; cost_of_token_generation = 0.0)).cost_of_token_generation)\n\ncall_cost(msg, model::String)

Calculate the cost of a call based on the number of tokens in the message and the cost per token.

Arguments

Returns

Examples

julia
# Assuming MODEL_REGISTRY is set up with appropriate costs\nMODEL_REGISTRY = Dict(\n    "model1" => (cost_of_token_prompt = 0.05, cost_of_token_generation = 0.10),\n    "model2" => (cost_of_token_prompt = 0.07, cost_of_token_generation = 0.02)\n)\n\ncost1 = call_cost(10, 20, "model1")\n\n# from message\nmsg1 = AIMessage(;tokens=[10, 20])  # 10 prompt tokens, 20 generation tokens\ncost1 = call_cost(msg1, "model1")\n# cost1 = 10 * 0.05 + 20 * 0.10 = 2.5\n\n# Using custom token costs\ncost2 = call_cost(10, 20, "model3"; cost_of_token_prompt = 0.08, cost_of_token_generation = 0.12)\n# cost2 = 10 * 0.08 + 20 * 0.12 = 3.2

source


# PromptingTools.call_cost_alternativeMethod.

call_cost_alternative()

Alternative cost calculation. Used to calculate cost of image generation with DALL-E 3 and similar.

source


# PromptingTools.callbackMethod.
julia
callback(cb::AbstractStreamCallback, chunk::StreamChunk; kwargs...)

Process the chunk to be printed and print it. It's a wrapper for two operations:

source


# PromptingTools.configure_callback!Method.
julia
configure_callback!(cb::StreamCallback, schema::AbstractPromptSchema;\n    api_kwargs...)

Configures the callback cb for streaming with a given prompt schema.

If no cb.flavor is provided, adjusts the flavor and the provided api_kwargs as necessary. Eg, for most schemas, we add kwargs like stream = true to the api_kwargs.

If cb.flavor is provided, both callback and api_kwargs are left unchanged! You need to configure them yourself!

source


', 73)), createBaseVNode("div", _hoisted_3, [ _cache[16] || (_cache[16] = createStaticVNode('# PromptingTools.create_templateMethod.
julia
create_template(; user::AbstractString, system::AbstractString="Act as a helpful AI assistant.", \n    load_as::Union{Nothing, Symbol, AbstractString} = nothing)\n\ncreate_template(system::AbstractString, user::AbstractString, \n    load_as::Union{Nothing, Symbol, AbstractString} = nothing)

Creates a simple template with a user and system message. Convenience function to prevent writing [PT.UserMessage(...), ...]

Arguments

', 10)), createBaseVNode("p", null, [ @@ -56,9 +56,9 @@ function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { _cache[14] || (_cache[14] = createBaseVNode("code", null, "kwargs", -1)), _cache[15] || (_cache[15] = createTextVNode(" during the AI call (see example).")) ]), - _cache[17] || (_cache[17] = createStaticVNode('

Returns a vector of SystemMessage and UserMessage objects. If load_as is provided, it registers the template in the TEMPLATE_STORE and TEMPLATE_METADATA as well.

Examples

Let's generate a quick template for a simple conversation (only one placeholder: name)

julia
# first system message, then user message (or use kwargs)\ntpl=PT.create_template("You must speak like a pirate", "Say hi to {{name}}")\n\n## 2-element Vector{PromptingTools.AbstractChatMessage}:\n## PromptingTools.SystemMessage("You must speak like a pirate")\n##  PromptingTools.UserMessage("Say hi to {{name}}")

You can immediately use this template in ai* functions:

julia
aigenerate(tpl; name="Jack Sparrow")\n# Output: AIMessage("Arr, me hearty! Best be sending me regards to Captain Jack Sparrow on the salty seas! May his compass always point true to the nearest treasure trove. Yarrr!")

If you're interested in saving the template in the template registry, jump to the end of these examples!

If you want to save it in your project folder:

julia
PT.save_template("templates/GreatingPirate.json", tpl; version="1.0") # optionally, add description

It will be saved and accessed under its basename, ie, GreatingPirate.

Now you can load it like all the other templates (provide the template directory):

julia
PT.load_templates!("templates") # it will remember the folder after the first run\n# Note: If you save it again, overwrite it, etc., you need to explicitly reload all templates again!

You can verify that your template is loaded with a quick search for "pirate":

julia
aitemplates("pirate")\n\n## 1-element Vector{AITemplateMetadata}:\n## PromptingTools.AITemplateMetadata\n##   name: Symbol GreatingPirate\n##   description: String ""\n##   version: String "1.0"\n##   wordcount: Int64 46\n##   variables: Array{Symbol}((1,))\n##   system_preview: String "You must speak like a pirate"\n##   user_preview: String "Say hi to {{name}}"\n##   source: String ""

Now you can use it like any other template (notice it's a symbol, so :GreatingPirate):

julia
aigenerate(:GreatingPirate; name="Jack Sparrow")\n# Output: AIMessage("Arr, me hearty! Best be sending me regards to Captain Jack Sparrow on the salty seas! May his compass always point true to the nearest treasure trove. Yarrr!")

If you do not need to save this template as a file, but you want to make it accessible in the template store for all ai* functions, you can use the load_as (= template name) keyword argument:

julia
# this will not only create the template, but also register it for immediate use\ntpl=PT.create_template("You must speak like a pirate", "Say hi to {{name}}"; load_as="GreatingPirate")\n\n# you can now use it like any other template\naiextract(:GreatingPirate; name="Jack Sparrow")

source

', 19)) + _cache[17] || (_cache[17] = createStaticVNode('

Returns a vector of SystemMessage and UserMessage objects. If load_as is provided, it registers the template in the TEMPLATE_STORE and TEMPLATE_METADATA as well.

Examples

Let's generate a quick template for a simple conversation (only one placeholder: name)

julia
# first system message, then user message (or use kwargs)\ntpl=PT.create_template("You must speak like a pirate", "Say hi to {{name}}")\n\n## 2-element Vector{PromptingTools.AbstractChatMessage}:\n## PromptingTools.SystemMessage("You must speak like a pirate")\n##  PromptingTools.UserMessage("Say hi to {{name}}")

You can immediately use this template in ai* functions:

julia
aigenerate(tpl; name="Jack Sparrow")\n# Output: AIMessage("Arr, me hearty! Best be sending me regards to Captain Jack Sparrow on the salty seas! May his compass always point true to the nearest treasure trove. Yarrr!")

If you're interested in saving the template in the template registry, jump to the end of these examples!

If you want to save it in your project folder:

julia
PT.save_template("templates/GreatingPirate.json", tpl; version="1.0") # optionally, add description

It will be saved and accessed under its basename, ie, GreatingPirate.

Now you can load it like all the other templates (provide the template directory):

julia
PT.load_templates!("templates") # it will remember the folder after the first run\n# Note: If you save it again, overwrite it, etc., you need to explicitly reload all templates again!

You can verify that your template is loaded with a quick search for "pirate":

julia
aitemplates("pirate")\n\n## 1-element Vector{AITemplateMetadata}:\n## PromptingTools.AITemplateMetadata\n##   name: Symbol GreatingPirate\n##   description: String ""\n##   version: String "1.0"\n##   wordcount: Int64 46\n##   variables: Array{Symbol}((1,))\n##   system_preview: String "You must speak like a pirate"\n##   user_preview: String "Say hi to {{name}}"\n##   source: String ""

Now you can use it like any other template (notice it's a symbol, so :GreatingPirate):

julia
aigenerate(:GreatingPirate; name="Jack Sparrow")\n# Output: AIMessage("Arr, me hearty! Best be sending me regards to Captain Jack Sparrow on the salty seas! May his compass always point true to the nearest treasure trove. Yarrr!")

If you do not need to save this template as a file, but you want to make it accessible in the template store for all ai* functions, you can use the load_as (= template name) keyword argument:

julia
# this will not only create the template, but also register it for immediate use\ntpl=PT.create_template("You must speak like a pirate", "Say hi to {{name}}"; load_as="GreatingPirate")\n\n# you can now use it like any other template\naiextract(:GreatingPirate; name="Jack Sparrow")

source

', 19)) ]), - _cache[50] || (_cache[50] = createStaticVNode('
# PromptingTools.decode_choicesMethod.
julia
decode_choices(schema::OpenAISchema,\n    choices::AbstractVector{<:AbstractString},\n    msg::AIMessage; model::AbstractString,\n    token_ids_map::Union{Nothing, Dict{<:AbstractString, <:Integer}} = nothing,\n    kwargs...)

Decodes the underlying AIMessage against the original choices to lookup what the category name was.

If it fails, it will return msg.content == nothing

source


# PromptingTools.detect_base_main_overridesMethod.
julia
detect_base_main_overrides(code_block::AbstractString)

Detects if a given code block overrides any Base or Main methods.

Returns a tuple of a boolean and a vector of the overriden methods.

source


# PromptingTools.distance_longest_common_subsequenceMethod.
julia
distance_longest_common_subsequence(\n    input1::AbstractString, input2::AbstractString)\n\ndistance_longest_common_subsequence(\n    input1::AbstractString, input2::AbstractVector{<:AbstractString})

Measures distance between two strings using the length of the longest common subsequence (ie, the lower the number, the better the match). Perfect match is distance = 0.0

Convenience wrapper around length_longest_common_subsequence to normalize the distances to 0-1 range. There is a also a dispatch for comparing a string vs an array of strings.

Notes

Arguments

Example

You can also use it to find the closest context for some AI generated summary/story:

julia
context = ["The enigmatic stranger vanished as swiftly as a wisp of smoke, leaving behind a trail of unanswered questions.",\n    "Beneath the shimmering moonlight, the ocean whispered secrets only the stars could hear.",\n    "The ancient tree stood as a silent guardian, its gnarled branches reaching for the heavens.",\n    "The melody danced through the air, painting a vibrant tapestry of emotions.",\n    "Time flowed like a relentless river, carrying away memories and leaving imprints in its wake."]\n\nstory = """\n    Beneath the shimmering moonlight, the ocean whispered secrets only the stars could hear.\n\n    Under the celestial tapestry, the vast ocean whispered its secrets to the indifferent stars. Each ripple, a murmured confidence, each wave, a whispered lament. The glittering celestial bodies listened in silent complicity, their enigmatic gaze reflecting the ocean's unspoken truths. The cosmic dance between the sea and the sky, a symphony of shared secrets, forever echoing in the ethereal expanse.\n    """\n\ndist = distance_longest_common_subsequence(story, context)\n@info "The closest context to the query: "$(first(story,20))..." is: "$(context[argmin(dist)])" (distance: $(minimum(dist)))"

source


# PromptingTools.encode_choicesMethod.
julia
encode_choices(schema::OpenAISchema, choices::AbstractVector{<:AbstractString};\n    model::AbstractString,\n    token_ids_map::Union{Nothing, Dict{<:AbstractString, <:Integer}} = nothing,\n    kwargs...)\n\nencode_choices(schema::OpenAISchema, choices::AbstractVector{T};\n    model::AbstractString,\n    token_ids_map::Union{Nothing, Dict{<:AbstractString, <:Integer}} = nothing,\n    kwargs...) where {T <: Tuple{<:AbstractString, <:AbstractString}}

Encode the choices into an enumerated list that can be interpolated into the prompt and creates the corresponding logit biases (to choose only from the selected tokens).

Optionally, can be a vector tuples, where the first element is the choice and the second is the description.

There can be at most 40 choices provided.

Arguments

Returns

Examples

julia
choices_prompt, logit_bias, _ = PT.encode_choices(PT.OpenAISchema(), ["true", "false"])\nchoices_prompt # Output: "true for "true"\nfalse for "false"\nlogit_bias # Output: Dict(837 => 100, 905 => 100)\n\nchoices_prompt, logit_bias, _ = PT.encode_choices(PT.OpenAISchema(), ["animal", "plant"])\nchoices_prompt # Output: "1. "animal"\n2. "plant""\nlogit_bias # Output: Dict(16 => 100, 17 => 100)

Or choices with descriptions:

julia
choices_prompt, logit_bias, _ = PT.encode_choices(PT.OpenAISchema(), [("A", "any animal or creature"), ("P", "for any plant or tree"), ("O", "for everything else")])\nchoices_prompt # Output: "1. "A" for any animal or creature\n2. "P" for any plant or tree\n3. "O" for everything else"\nlogit_bias # Output: Dict(16 => 100, 17 => 100, 18 => 100)

source


# PromptingTools.eval!Method.
julia
eval!(cb::AbstractCodeBlock;\n    safe_eval::Bool = true,\n    capture_stdout::Bool = true,\n    prefix::AbstractString = "",\n    suffix::AbstractString = "")

Evaluates a code block cb in-place. It runs automatically when AICode is instantiated with a String.

Check the outcome of evaluation with Base.isvalid(cb). If ==true, provide code block has executed successfully.

Steps:

Keyword Arguments

source


# PromptingTools.execute_toolMethod.
julia
execute_tool(f::Function, args::AbstractDict)

Executes a function with the provided arguments.

Dictionary is un-ordered, so we need to sort the arguments first and then pass them to the function.

source


# PromptingTools.extract_chunksMethod.
julia
extract_chunks(flavor::AbstractStreamFlavor, blob::AbstractString;\n    spillover::AbstractString = "", verbose::Bool = false, kwargs...)

Extract the chunks from the received SSE blob. Shared by all streaming flavors currently.

Returns a list of StreamChunk and the next spillover (if message was incomplete).

source


# PromptingTools.extract_code_blocksMethod.
julia
extract_code_blocks(markdown_content::String) -> Vector{String}

Extract Julia code blocks from a markdown string.

This function searches through the provided markdown content, identifies blocks of code specifically marked as Julia code (using the julia ... code fence patterns), and extracts the code within these blocks. The extracted code blocks are returned as a vector of strings, with each string representing one block of Julia code.

Note: Only the content within the code fences is extracted, and the code fences themselves are not included in the output.

See also: extract_code_blocks_fallback

Arguments

Returns

Examples

Example with a single Julia code block

julia
markdown_single = """

julia println("Hello, World!")

"""\nextract_code_blocks(markdown_single)\n# Output: ["Hello, World!"]
julia
# Example with multiple Julia code blocks\nmarkdown_multiple = """

julia x = 5

Some text in between

julia y = x + 2

"""\nextract_code_blocks(markdown_multiple)\n# Output: ["x = 5", "y = x + 2"]

source


# PromptingTools.extract_code_blocks_fallbackMethod.
julia
extract_code_blocks_fallback(markdown_content::String, delim::AbstractString="\\n```\\n")

Extract Julia code blocks from a markdown string using a fallback method (splitting by arbitrary delim-iters). Much more simplistic than extract_code_blocks and does not support nested code blocks.

It is often used as a fallback for smaller LLMs that forget to code fence julia ....

Example

julia
code = """

println("hello")

\nSome text

println("world")

"""\n\n# We extract text between triple backticks and check each blob if it looks like a valid Julia code\ncode_parsed = extract_code_blocks_fallback(code) |> x -> filter(is_julia_code, x) |> x -> join(x, "\n")

source


# PromptingTools.extract_contentMethod.
julia
extract_content(flavor::AnthropicStream, chunk)

Extract the content from the chunk.

source


# PromptingTools.extract_contentMethod.
julia
extract_content(flavor::OpenAIStream, chunk::StreamChunk; kwargs...)

Extract the content from the chunk.

source


# PromptingTools.extract_docstringMethod.

Extract the docstring from a type or function.

source


# PromptingTools.extract_function_nameMethod.
julia
extract_function_name(code_block::String) -> Union{String, Nothing}

Extract the name of a function from a given Julia code block. The function searches for two patterns:

If a function name is found, it is returned as a string. If no function name is found, the function returns nothing.

To capture all function names in the block, use extract_function_names.

Arguments

Returns

Example

julia
code = """\nfunction myFunction(arg1, arg2)\n    # Function body\nend\n"""\nextract_function_name(code)\n# Output: "myFunction"

source


# PromptingTools.extract_function_namesMethod.
julia
extract_function_names(code_block::AbstractString)

Extract one or more names of functions defined in a given Julia code block. The function searches for two patterns: - The explicit function declaration pattern: function name(...) ... end - The concise function declaration pattern: name(...) = ...

It always returns a vector of strings, even if only one function name is found (it will be empty).

For only one function name match, use extract_function_name.

source


# PromptingTools.extract_julia_importsMethod.
julia
extract_julia_imports(input::AbstractString; base_or_main::Bool = false)

Detects any using or import statements in a given string and returns the package names as a vector of symbols.

base_or_main is a boolean that determines whether to isolate only Base and Main OR whether to exclude them in the returned vector.

source


# PromptingTools.finalize_outputsMethod.
julia
finalize_outputs(prompt::ALLOWED_PROMPT_TYPE, conv_rendered::Any,\n    msg::Union{Nothing, AbstractMessage, AbstractVector{<:AbstractMessage}};\n    return_all::Bool = false,\n    dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    no_system_message::Bool = false,\n    kwargs...)

Finalizes the outputs of the ai* functions by either returning the conversation history or the last message.

Keyword arguments

source


# PromptingTools.finalize_tracerMethod.
julia
finalize_tracer(\n    tracer_schema::AbstractTracerSchema, tracer, msg_or_conv::Union{\n        AbstractMessage, AbstractVector{<:AbstractMessage}};\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Finalizes the calltracer of whatever is nedeed after the ai* calls. Use tracer_kwargs to provide any information necessary (eg, parent_id, thread_id, run_id).

In the default implementation, we convert all non-tracer messages into TracerMessage.

See also: meta, unwrap, SaverSchema, initialize_tracer

source


# PromptingTools.finalize_tracerMethod.
julia
finalize_tracer(\n    tracer_schema::SaverSchema, tracer, msg_or_conv::Union{\n        AbstractMessage, AbstractVector{<:AbstractMessage}};\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Finalizes the calltracer by saving the provided conversation msg_or_conv to the disk.

Default path is LOG_DIR/conversation__<first_msg_hash>__<time_received_str>.json, where LOG_DIR is set by user preferences or ENV variable (defaults to log/ in current working directory).

If you want to change the logging directory or the exact file name to log with, you can provide the following arguments to tracer_kwargs:

It can be composed with TracerSchema to also attach necessary metadata (see below).

Example

julia
wrap_schema = PT.SaverSchema(PT.TracerSchema(PT.OpenAISchema()))\nconv = aigenerate(wrap_schema,:BlankSystemUser; system="You're a French-speaking assistant!",\n    user="Say hi!"; model="gpt-4", api_kwargs=(;temperature=0.1), return_all=true)\n\n# conv is a vector of messages that will be saved to a JSON together with metadata about the template and api_kwargs

See also: meta, unwrap, TracerSchema, initialize_tracer

source


# PromptingTools.find_subsequence_positionsMethod.
julia
find_subsequence_positions(subseq, seq) -> Vector{Int}

Find all positions of a subsequence subseq within a larger sequence seq. Used to lookup positions of code blocks in markdown.

This function scans the sequence seq and identifies all starting positions where the subsequence subseq is found. Both subseq and seq should be vectors of integers, typically obtained using codeunits on strings.

Arguments

Returns

Examples

julia
find_subsequence_positions(codeunits("ab"), codeunits("cababcab")) # Returns [2, 5]

source


# PromptingTools.generate_structMethod.
julia
generate_struct(fields::Vector)

Generate a struct with the given name and fields. Fields can be specified simply as symbols (with default type String) or pairs of symbol and type. Field descriptions can be provided by adding a pair with the field name suffixed with "**description" (eg, :myfield**description => "My field description").

Returns: A tuple of (struct type, descriptions)

Examples

julia
Weather, descriptions = generate_struct(\n    [:location,\n     :temperature=>Float64,\n     :temperature__description=>"Temperature in degrees Fahrenheit",\n     :condition=>String,\n     :condition__description=>"Current weather condition (e.g., sunny, rainy, cloudy)"\n    ])

source


# PromptingTools.get_arg_namesMethod.

Get the argument names from a function, ignores keyword arguments!!

source


# PromptingTools.get_arg_namesMethod.

Get the argument names from a method, ignores keyword arguments!!

source


# PromptingTools.get_arg_typesMethod.

Get the argument types from a function, ignores keyword arguments!!

source


# PromptingTools.get_arg_typesMethod.

Get the argument types from a method, ignores keyword arguments!!

source


# PromptingTools.get_preferencesMethod.
julia
get_preferences(key::String)

Get preferences for PromptingTools. See ?PREFERENCES for more information.

See also: set_preferences!

Example

julia
PromptingTools.get_preferences("MODEL_CHAT")

source


# PromptingTools.ggi_generate_contentFunction.

Stub - to be extended in extension: GoogleGenAIPromptingToolsExt. ggi stands for GoogleGenAI

source


# PromptingTools.handle_error_messageMethod.
julia
handle_error_message(chunk::StreamChunk; throw_on_error::Bool = false, kwargs...)

Handles error messages from the streaming response.

source


# PromptingTools.has_julia_promptMethod.

Checks if a given string has a Julia prompt (julia>) at the beginning of a line.

source


# PromptingTools.initialize_tracerMethod.
julia
initialize_tracer(\n    tracer_schema::AbstractTracerSchema; model = "", tracer_kwargs = NamedTuple(),\n    prompt::ALLOWED_PROMPT_TYPE = "", kwargs...)

Initializes tracer/callback (if necessary). Can provide any keyword arguments in tracer_kwargs (eg, parent_id, thread_id, run_id). Is executed prior to the ai* calls.

By default it captures:

In the default implementation, we just collect the necessary data to build the tracer object in finalize_tracer.

See also: meta, unwrap, TracerSchema, SaverSchema, finalize_tracer

source


# PromptingTools.is_doneMethod.
julia
is_done(flavor, chunk)

Check if the streaming is done. Shared by all streaming flavors currently.

source


# PromptingTools.isextractedMethod.

Check if the object is an instance of AbstractExtractedData

source


# PromptingTools.last_messageMethod.

Helpful accessor for the last message in conversation. Returns the last message in the conversation.

source


# PromptingTools.last_outputMethod.

Helpful accessor for the last generated output (msg.content) in conversation. Returns the last output in the conversation (eg, the string/data in the last message).

source


# PromptingTools.length_longest_common_subsequenceMethod.
julia
length_longest_common_subsequence(itr1::AbstractString, itr2::AbstractString)

Compute the length of the longest common subsequence between two string sequences (ie, the higher the number, the better the match).

Source: https://cn.julialang.org/LeetCode.jl/dev/democards/problems/problems/1143.longest-common-subsequence/

Arguments

Returns

The length of the longest common subsequence.

Examples

julia
text1 = "abc-abc----"\ntext2 = "___ab_c__abc"\nlongest_common_subsequence(text1, text2)\n# Output: 6 (-> "abcabc")

It can be used to fuzzy match strings and find the similarity between them (Tip: normalize the match)

julia
commands = ["product recommendation", "emotions", "specific product advice", "checkout advice"]\nquery = "Which product can you recommend for me?"\nlet pos = argmax(length_longest_common_subsequence.(Ref(query), commands))\n    dist = length_longest_common_subsequence(query, commands[pos])\n    norm = dist / min(length(query), length(commands[pos]))\n    @info "The closest command to the query: "$(query)" is: "$(commands[pos])" (distance: $(dist), normalized: $(norm))"\nend

But it might be easier to use directly the convenience wrapper distance_longest_common_subsequence!

\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/utils.jl#L252-L288)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.list_aliases-Tuple{}' href='#PromptingTools.list_aliases-Tuple{}'>#</a>&nbsp;<b><u>PromptingTools.list_aliases</u></b> &mdash; <i>Method</i>.\n\n\n\n\nShows the Dictionary of model aliases in the registry. Add more with `MODEL_ALIASES[alias] = model_name`.\n\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/user_preferences.jl#L1009)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.list_registry-Tuple{}' href='#PromptingTools.list_registry-Tuple{}'>#</a>&nbsp;<b><u>PromptingTools.list_registry</u></b> &mdash; <i>Method</i>.\n\n\n\n\nShows the list of models in the registry. Add more with `register_model!`.\n\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/user_preferences.jl#L1007)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.load_api_keys!-Tuple{}' href='#PromptingTools.load_api_keys!-Tuple{}'>#</a>&nbsp;<b><u>PromptingTools.load_api_keys!</u></b> &mdash; <i>Method</i>.\n\n\n\n\nLoads API keys from environment variables and preferences\n\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/user_preferences.jl#L170)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.load_conversation-Tuple{Union{AbstractString, IO}}' href='#PromptingTools.load_conversation-Tuple{Union{AbstractString, IO}}'>#</a>&nbsp;<b><u>PromptingTools.load_conversation</u></b> &mdash; <i>Method</i>.\n\n\n\n\n```julia\nload_conversation(io_or_file::Union{IO, AbstractString})

Loads a conversation (messages) from io_or_file

source


# PromptingTools.load_templateMethod.
julia
load_template(io_or_file::Union{IO, AbstractString})

Loads messaging template from io_or_file and returns tuple of template messages and metadata.

source


# PromptingTools.load_templates!Function.
julia
load_templates!(dir_templates::Union{String, Nothing} = nothing;\n    remember_path::Bool = true,\n    remove_templates::Bool = isnothing(dir_templates),\n    store::Dict{Symbol, <:Any} = TEMPLATE_STORE,\n    metadata_store::Vector{<:AITemplateMetadata} = TEMPLATE_METADATA)

Loads templates from folder templates/ in the package root and stores them in TEMPLATE_STORE and TEMPLATE_METADATA.

Note: Automatically removes any existing templates and metadata from TEMPLATE_STORE and TEMPLATE_METADATA if remove_templates=true.

Arguments

Example

Load the default templates:

julia
PT.load_templates!() # no path needed

Load templates from a new custom path:

julia
PT.load_templates!("path/to/templates") # we will remember this path for future refresh

If you want to now refresh the default templates and the new path, just call load_templates!() without any arguments.

source


# PromptingTools.metaMethod.

Extracts the metadata dictionary from the tracer message or tracer-like object.

source


# PromptingTools.ollama_apiFunction.
julia
ollama_api(prompt_schema::Union{AbstractOllamaManagedSchema, AbstractOllamaSchema},\n    prompt::Union{AbstractString, Nothing} = nothing;\n    system::Union{Nothing, AbstractString} = nothing,\n    messages::Vector{<:AbstractMessage} = AbstractMessage[],\n    endpoint::String = "generate",\n    model::String = "llama2", http_kwargs::NamedTuple = NamedTuple(),\n    stream::Bool = false,\n    url::String = "localhost", port::Int = 11434,\n    kwargs...)

Simple wrapper for a call to Ollama API.

Keyword Arguments

source


# PromptingTools.parse_toolMethod.
julia
parse_tool(datatype::Type, blob::AbstractString)

Parse the JSON blob into the specified datatype in try-catch mode.

If parsing fails, it tries to return the untyped JSON blob in a dictionary.

source


# PromptingTools.pprintFunction.

Utility for pretty printing PromptingTools types in REPL.

source


# PromptingTools.pprintMethod.
julia
pprint(io::IO, conversation::AbstractVector{<:AbstractMessage})

Pretty print a vector of AbstractMessage to the given IO stream.

source


# PromptingTools.pprintMethod.
julia
pprint(io::IO, msg::AbstractMessage; text_width::Int = displaysize(io)[2])

Pretty print a single AbstractMessage to the given IO stream.

text_width is the width of the text to be displayed. If not provided, it defaults to the width of the given IO stream and add newline separators as needed.

source


# PromptingTools.previewFunction.

Utility for rendering the conversation (vector of messages) as markdown. REQUIRES the Markdown package to load the extension! See also pprint

source


# PromptingTools.print_contentMethod.
julia
print_content(out::Channel, text::AbstractString; kwargs...)

Print the content to the provided Channel out.

source


# PromptingTools.print_contentMethod.
julia
print_content(out::IO, text::AbstractString; kwargs...)

Print the content to the IO output stream out.

source


# PromptingTools.print_contentMethod.
julia
print_content(out::Nothing, text::Any)

Do nothing if the output stream is nothing.

source


# PromptingTools.push_conversation!Method.
julia
push_conversation!(conv_history, conversation::AbstractVector, max_history::Union{Int, Nothing})

Add a new conversation to the conversation history and resize the history if necessary.

This function appends a conversation to the conv_history, which is a vector of conversations. Each conversation is represented as a vector of AbstractMessage objects. After adding the new conversation, the history is resized according to the max_history parameter to ensure that the size of the history does not exceed the specified limit.

Arguments

Returns

The updated conversation history.

Example

julia
new_conversation = aigenerate("Hello World"; return_all = true)\npush_conversation!(PT.CONV_HISTORY, new_conversation, 10)

This is done automatically by the ai"" macros.

source


# PromptingTools.recursive_splitterMethod.
julia
recursive_splitter(text::AbstractString, separators::Vector{String}; max_length::Int=35000) -> Vector{String}

Split a given string text into chunks recursively using a series of separators, with each chunk having a maximum length of max_length (if it's achievable given the separators provided). This function is useful for splitting large documents or texts into smaller segments that are more manageable for processing, particularly for models or systems with limited context windows.

It was previously known as split_by_length.

This is similar to Langchain's RecursiveCharacterTextSplitter. To achieve the same behavior, use separators=["\\n\\n", "\\n", " ", ""].

Arguments

Returns

Vector{String}: A vector of strings, where each string is a chunk of the original text that is smaller than or equal to max_length.

Usage Tips

How It Works

Examples

Splitting text using multiple separators:

julia
text = "Paragraph 1\\n\\nParagraph 2. Sentence 1. Sentence 2.\\nParagraph 3"\nseparators = ["\\n\\n", ". ", "\\n"] # split by paragraphs, sentences, and newlines (not by words)\nchunks = recursive_splitter(text, separators, max_length=20)

Splitting text using multiple separators - with splitting on words:

julia
text = "Paragraph 1\\n\\nParagraph 2. Sentence 1. Sentence 2.\\nParagraph 3"\nseparators = ["\\n\\n", ". ", "\\n", " "] # split by paragraphs, sentences, and newlines, words\nchunks = recursive_splitter(text, separators, max_length=10)

Using a single separator:

julia
text = "Hello,World," ^ 2900  # length 34900 characters\nchunks = recursive_splitter(text, [","], max_length=10000)

To achieve the same behavior as Langchain's RecursiveCharacterTextSplitter, use separators=["\\n\\n", "\\n", " ", ""].

julia
text = "Paragraph 1\\n\\nParagraph 2. Sentence 1. Sentence 2.\\nParagraph 3"\nseparators = ["\\n\\n", "\\n", " ", ""]\nchunks = recursive_splitter(text, separators, max_length=10)

source


# PromptingTools.recursive_splitterMethod.
julia
recursive_splitter(text::String; separator::String=" ", max_length::Int=35000) -> Vector{String}

Split a given string text into chunks of a specified maximum length max_length. This is particularly useful for splitting larger documents or texts into smaller segments, suitable for models or systems with smaller context windows.

There is a method for dispatching on multiple separators, recursive_splitter(text::String, separators::Vector{String}; max_length::Int=35000) -> Vector{String} that mimics the logic of Langchain's RecursiveCharacterTextSplitter.

Arguments

Returns

Vector{String}: A vector of strings, each representing a chunk of the original text that is smaller than or equal to max_length.

Notes

Examples

Splitting text with the default separator (" "):

julia
text = "Hello world. How are you?"\nchunks = recursive_splitter(text; max_length=13)\nlength(chunks) # Output: 2

Using a custom separator and custom max_length

julia
text = "Hello,World," ^ 2900 # length 34900 chars\nrecursive_splitter(text; separator=",", max_length=10000) # for 4K context window\nlength(chunks[1]) # Output: 4

source


# PromptingTools.register_model!Function.
julia
register_model!(registry = MODEL_REGISTRY;\n    name::String,\n    schema::Union{AbstractPromptSchema, Nothing} = nothing,\n    cost_of_token_prompt::Float64 = 0.0,\n    cost_of_token_generation::Float64 = 0.0,\n    description::String = "")

Register a new AI model with name and its associated schema.

Registering a model helps with calculating the costs and automatically selecting the right prompt schema.

Arguments

source


# PromptingTools.remove_julia_promptMethod.
julia
remove_julia_prompt(s::T) where {T<:AbstractString}

If it detects a julia prompt, it removes it and all lines that do not have it (except for those that belong to the code block).

source


# PromptingTools.remove_templates!Method.
julia
    remove_templates!()

Removes all templates from TEMPLATE_STORE and TEMPLATE_METADATA.

source


# PromptingTools.remove_unsafe_linesMethod.

Iterates over the lines of a string and removes those that contain a package operation or a missing import.

source


# PromptingTools.renderMethod.

Renders provided messaging template (template) under the default schema (PROMPT_SCHEMA).

source


', 109)), + _cache[50] || (_cache[50] = createStaticVNode('
# PromptingTools.decode_choicesMethod.
julia
decode_choices(schema::OpenAISchema,\n    choices::AbstractVector{<:AbstractString},\n    msg::AIMessage; model::AbstractString,\n    token_ids_map::Union{Nothing, Dict{<:AbstractString, <:Integer}} = nothing,\n    kwargs...)

Decodes the underlying AIMessage against the original choices to lookup what the category name was.

If it fails, it will return msg.content == nothing

source


# PromptingTools.detect_base_main_overridesMethod.
julia
detect_base_main_overrides(code_block::AbstractString)

Detects if a given code block overrides any Base or Main methods.

Returns a tuple of a boolean and a vector of the overriden methods.

source


# PromptingTools.distance_longest_common_subsequenceMethod.
julia
distance_longest_common_subsequence(\n    input1::AbstractString, input2::AbstractString)\n\ndistance_longest_common_subsequence(\n    input1::AbstractString, input2::AbstractVector{<:AbstractString})

Measures distance between two strings using the length of the longest common subsequence (ie, the lower the number, the better the match). Perfect match is distance = 0.0

Convenience wrapper around length_longest_common_subsequence to normalize the distances to 0-1 range. There is a also a dispatch for comparing a string vs an array of strings.

Notes

Arguments

Example

You can also use it to find the closest context for some AI generated summary/story:

julia
context = ["The enigmatic stranger vanished as swiftly as a wisp of smoke, leaving behind a trail of unanswered questions.",\n    "Beneath the shimmering moonlight, the ocean whispered secrets only the stars could hear.",\n    "The ancient tree stood as a silent guardian, its gnarled branches reaching for the heavens.",\n    "The melody danced through the air, painting a vibrant tapestry of emotions.",\n    "Time flowed like a relentless river, carrying away memories and leaving imprints in its wake."]\n\nstory = """\n    Beneath the shimmering moonlight, the ocean whispered secrets only the stars could hear.\n\n    Under the celestial tapestry, the vast ocean whispered its secrets to the indifferent stars. Each ripple, a murmured confidence, each wave, a whispered lament. The glittering celestial bodies listened in silent complicity, their enigmatic gaze reflecting the ocean's unspoken truths. The cosmic dance between the sea and the sky, a symphony of shared secrets, forever echoing in the ethereal expanse.\n    """\n\ndist = distance_longest_common_subsequence(story, context)\n@info "The closest context to the query: "$(first(story,20))..." is: "$(context[argmin(dist)])" (distance: $(minimum(dist)))"

source


# PromptingTools.encode_choicesMethod.
julia
encode_choices(schema::OpenAISchema, choices::AbstractVector{<:AbstractString};\n    model::AbstractString,\n    token_ids_map::Union{Nothing, Dict{<:AbstractString, <:Integer}} = nothing,\n    kwargs...)\n\nencode_choices(schema::OpenAISchema, choices::AbstractVector{T};\n    model::AbstractString,\n    token_ids_map::Union{Nothing, Dict{<:AbstractString, <:Integer}} = nothing,\n    kwargs...) where {T <: Tuple{<:AbstractString, <:AbstractString}}

Encode the choices into an enumerated list that can be interpolated into the prompt and creates the corresponding logit biases (to choose only from the selected tokens).

Optionally, can be a vector tuples, where the first element is the choice and the second is the description.

There can be at most 40 choices provided.

Arguments

Returns

Examples

julia
choices_prompt, logit_bias, _ = PT.encode_choices(PT.OpenAISchema(), ["true", "false"])\nchoices_prompt # Output: "true for "true"\nfalse for "false"\nlogit_bias # Output: Dict(837 => 100, 905 => 100)\n\nchoices_prompt, logit_bias, _ = PT.encode_choices(PT.OpenAISchema(), ["animal", "plant"])\nchoices_prompt # Output: "1. "animal"\n2. "plant""\nlogit_bias # Output: Dict(16 => 100, 17 => 100)

Or choices with descriptions:

julia
choices_prompt, logit_bias, _ = PT.encode_choices(PT.OpenAISchema(), [("A", "any animal or creature"), ("P", "for any plant or tree"), ("O", "for everything else")])\nchoices_prompt # Output: "1. "A" for any animal or creature\n2. "P" for any plant or tree\n3. "O" for everything else"\nlogit_bias # Output: Dict(16 => 100, 17 => 100, 18 => 100)

source


# PromptingTools.eval!Method.
julia
eval!(cb::AbstractCodeBlock;\n    safe_eval::Bool = true,\n    capture_stdout::Bool = true,\n    prefix::AbstractString = "",\n    suffix::AbstractString = "")

Evaluates a code block cb in-place. It runs automatically when AICode is instantiated with a String.

Check the outcome of evaluation with Base.isvalid(cb). If ==true, provide code block has executed successfully.

Steps:

Keyword Arguments

source


# PromptingTools.execute_toolMethod.
julia
execute_tool(f::Function, args::AbstractDict)

Executes a function with the provided arguments.

Dictionary is un-ordered, so we need to sort the arguments first and then pass them to the function.

source


# PromptingTools.extract_chunksMethod.
julia
extract_chunks(flavor::AbstractStreamFlavor, blob::AbstractString;\n    spillover::AbstractString = "", verbose::Bool = false, kwargs...)

Extract the chunks from the received SSE blob. Shared by all streaming flavors currently.

Returns a list of StreamChunk and the next spillover (if message was incomplete).

source


# PromptingTools.extract_code_blocksMethod.
julia
extract_code_blocks(markdown_content::String) -> Vector{String}

Extract Julia code blocks from a markdown string.

This function searches through the provided markdown content, identifies blocks of code specifically marked as Julia code (using the julia ... code fence patterns), and extracts the code within these blocks. The extracted code blocks are returned as a vector of strings, with each string representing one block of Julia code.

Note: Only the content within the code fences is extracted, and the code fences themselves are not included in the output.

See also: extract_code_blocks_fallback

Arguments

Returns

Examples

Example with a single Julia code block

julia
markdown_single = """

julia println("Hello, World!")

"""\nextract_code_blocks(markdown_single)\n# Output: ["Hello, World!"]
julia
# Example with multiple Julia code blocks\nmarkdown_multiple = """

julia x = 5

Some text in between

julia y = x + 2

"""\nextract_code_blocks(markdown_multiple)\n# Output: ["x = 5", "y = x + 2"]

source


# PromptingTools.extract_code_blocks_fallbackMethod.
julia
extract_code_blocks_fallback(markdown_content::String, delim::AbstractString="\\n```\\n")

Extract Julia code blocks from a markdown string using a fallback method (splitting by arbitrary delim-iters). Much more simplistic than extract_code_blocks and does not support nested code blocks.

It is often used as a fallback for smaller LLMs that forget to code fence julia ....

Example

julia
code = """

println("hello")

\nSome text

println("world")

"""\n\n# We extract text between triple backticks and check each blob if it looks like a valid Julia code\ncode_parsed = extract_code_blocks_fallback(code) |> x -> filter(is_julia_code, x) |> x -> join(x, "\n")

source


# PromptingTools.extract_contentMethod.
julia
extract_content(flavor::AnthropicStream, chunk)

Extract the content from the chunk.

source


# PromptingTools.extract_contentMethod.
julia
extract_content(flavor::OpenAIStream, chunk::StreamChunk; kwargs...)

Extract the content from the chunk.

source


# PromptingTools.extract_docstringMethod.

Extract the docstring from a type or function.

source


# PromptingTools.extract_function_nameMethod.
julia
extract_function_name(code_block::String) -> Union{String, Nothing}

Extract the name of a function from a given Julia code block. The function searches for two patterns:

If a function name is found, it is returned as a string. If no function name is found, the function returns nothing.

To capture all function names in the block, use extract_function_names.

Arguments

Returns

Example

julia
code = """\nfunction myFunction(arg1, arg2)\n    # Function body\nend\n"""\nextract_function_name(code)\n# Output: "myFunction"

source


# PromptingTools.extract_function_namesMethod.
julia
extract_function_names(code_block::AbstractString)

Extract one or more names of functions defined in a given Julia code block. The function searches for two patterns: - The explicit function declaration pattern: function name(...) ... end - The concise function declaration pattern: name(...) = ...

It always returns a vector of strings, even if only one function name is found (it will be empty).

For only one function name match, use extract_function_name.

source


# PromptingTools.extract_julia_importsMethod.
julia
extract_julia_imports(input::AbstractString; base_or_main::Bool = false)

Detects any using or import statements in a given string and returns the package names as a vector of symbols.

base_or_main is a boolean that determines whether to isolate only Base and Main OR whether to exclude them in the returned vector.

source


# PromptingTools.finalize_outputsMethod.
julia
finalize_outputs(prompt::ALLOWED_PROMPT_TYPE, conv_rendered::Any,\n    msg::Union{Nothing, AbstractMessage, AbstractVector{<:AbstractMessage}};\n    return_all::Bool = false,\n    dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    no_system_message::Bool = false,\n    kwargs...)

Finalizes the outputs of the ai* functions by either returning the conversation history or the last message.

Keyword arguments

source


# PromptingTools.finalize_tracerMethod.
julia
finalize_tracer(\n    tracer_schema::AbstractTracerSchema, tracer, msg_or_conv::Union{\n        AbstractMessage, AbstractVector{<:AbstractMessage}};\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Finalizes the calltracer of whatever is nedeed after the ai* calls. Use tracer_kwargs to provide any information necessary (eg, parent_id, thread_id, run_id).

In the default implementation, we convert all non-tracer messages into TracerMessage.

See also: meta, unwrap, SaverSchema, initialize_tracer

source


# PromptingTools.finalize_tracerMethod.
julia
finalize_tracer(\n    tracer_schema::SaverSchema, tracer, msg_or_conv::Union{\n        AbstractMessage, AbstractVector{<:AbstractMessage}};\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Finalizes the calltracer by saving the provided conversation msg_or_conv to the disk.

Default path is LOG_DIR/conversation__<first_msg_hash>__<time_received_str>.json, where LOG_DIR is set by user preferences or ENV variable (defaults to log/ in current working directory).

If you want to change the logging directory or the exact file name to log with, you can provide the following arguments to tracer_kwargs:

It can be composed with TracerSchema to also attach necessary metadata (see below).

Example

julia
wrap_schema = PT.SaverSchema(PT.TracerSchema(PT.OpenAISchema()))\nconv = aigenerate(wrap_schema,:BlankSystemUser; system="You're a French-speaking assistant!",\n    user="Say hi!"; model="gpt-4", api_kwargs=(;temperature=0.1), return_all=true)\n\n# conv is a vector of messages that will be saved to a JSON together with metadata about the template and api_kwargs

See also: meta, unwrap, TracerSchema, initialize_tracer

source


# PromptingTools.find_subsequence_positionsMethod.
julia
find_subsequence_positions(subseq, seq) -> Vector{Int}

Find all positions of a subsequence subseq within a larger sequence seq. Used to lookup positions of code blocks in markdown.

This function scans the sequence seq and identifies all starting positions where the subsequence subseq is found. Both subseq and seq should be vectors of integers, typically obtained using codeunits on strings.

Arguments

Returns

Examples

julia
find_subsequence_positions(codeunits("ab"), codeunits("cababcab")) # Returns [2, 5]

source


# PromptingTools.generate_structMethod.
julia
generate_struct(fields::Vector)

Generate a struct with the given name and fields. Fields can be specified simply as symbols (with default type String) or pairs of symbol and type. Field descriptions can be provided by adding a pair with the field name suffixed with "**description" (eg, :myfield**description => "My field description").

Returns: A tuple of (struct type, descriptions)

Examples

julia
Weather, descriptions = generate_struct(\n    [:location,\n     :temperature=>Float64,\n     :temperature__description=>"Temperature in degrees Fahrenheit",\n     :condition=>String,\n     :condition__description=>"Current weather condition (e.g., sunny, rainy, cloudy)"\n    ])

source


# PromptingTools.get_arg_namesMethod.

Get the argument names from a function, ignores keyword arguments!!

source


# PromptingTools.get_arg_namesMethod.

Get the argument names from a method, ignores keyword arguments!!

source


# PromptingTools.get_arg_typesMethod.

Get the argument types from a function, ignores keyword arguments!!

source


# PromptingTools.get_arg_typesMethod.

Get the argument types from a method, ignores keyword arguments!!

source


# PromptingTools.get_preferencesMethod.
julia
get_preferences(key::String)

Get preferences for PromptingTools. See ?PREFERENCES for more information.

See also: set_preferences!

Example

julia
PromptingTools.get_preferences("MODEL_CHAT")

source


# PromptingTools.ggi_generate_contentFunction.

Stub - to be extended in extension: GoogleGenAIPromptingToolsExt. ggi stands for GoogleGenAI

source


# PromptingTools.handle_error_messageMethod.
julia
handle_error_message(chunk::StreamChunk; throw_on_error::Bool = false, kwargs...)

Handles error messages from the streaming response.

source


# PromptingTools.has_julia_promptMethod.

Checks if a given string has a Julia prompt (julia>) at the beginning of a line.

source


# PromptingTools.initialize_tracerMethod.
julia
initialize_tracer(\n    tracer_schema::AbstractTracerSchema; model = "", tracer_kwargs = NamedTuple(),\n    prompt::ALLOWED_PROMPT_TYPE = "", kwargs...)

Initializes tracer/callback (if necessary). Can provide any keyword arguments in tracer_kwargs (eg, parent_id, thread_id, run_id). Is executed prior to the ai* calls.

By default it captures:

In the default implementation, we just collect the necessary data to build the tracer object in finalize_tracer.

See also: meta, unwrap, TracerSchema, SaverSchema, finalize_tracer

source


# PromptingTools.is_doneMethod.
julia
is_done(flavor, chunk)

Check if the streaming is done. Shared by all streaming flavors currently.

source


# PromptingTools.isextractedMethod.

Check if the object is an instance of AbstractExtractedData

source


# PromptingTools.last_messageMethod.

Helpful accessor for the last message in conversation. Returns the last message in the conversation.

source


# PromptingTools.last_outputMethod.

Helpful accessor for the last generated output (msg.content) in conversation. Returns the last output in the conversation (eg, the string/data in the last message).

source


# PromptingTools.length_longest_common_subsequenceMethod.
julia
length_longest_common_subsequence(itr1::AbstractString, itr2::AbstractString)

Compute the length of the longest common subsequence between two string sequences (ie, the higher the number, the better the match).

Source: https://cn.julialang.org/LeetCode.jl/dev/democards/problems/problems/1143.longest-common-subsequence/

Arguments

Returns

The length of the longest common subsequence.

Examples

julia
text1 = "abc-abc----"\ntext2 = "___ab_c__abc"\nlongest_common_subsequence(text1, text2)\n# Output: 6 (-> "abcabc")

It can be used to fuzzy match strings and find the similarity between them (Tip: normalize the match)

julia
commands = ["product recommendation", "emotions", "specific product advice", "checkout advice"]\nquery = "Which product can you recommend for me?"\nlet pos = argmax(length_longest_common_subsequence.(Ref(query), commands))\n    dist = length_longest_common_subsequence(query, commands[pos])\n    norm = dist / min(length(query), length(commands[pos]))\n    @info "The closest command to the query: "$(query)" is: "$(commands[pos])" (distance: $(dist), normalized: $(norm))"\nend

But it might be easier to use directly the convenience wrapper distance_longest_common_subsequence!

\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/utils.jl#L252-L288)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.list_aliases-Tuple{}' href='#PromptingTools.list_aliases-Tuple{}'>#</a>&nbsp;<b><u>PromptingTools.list_aliases</u></b> &mdash; <i>Method</i>.\n\n\n\n\nShows the Dictionary of model aliases in the registry. Add more with `MODEL_ALIASES[alias] = model_name`.\n\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/user_preferences.jl#L1009)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.list_registry-Tuple{}' href='#PromptingTools.list_registry-Tuple{}'>#</a>&nbsp;<b><u>PromptingTools.list_registry</u></b> &mdash; <i>Method</i>.\n\n\n\n\nShows the list of models in the registry. Add more with `register_model!`.\n\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/user_preferences.jl#L1007)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.load_api_keys!-Tuple{}' href='#PromptingTools.load_api_keys!-Tuple{}'>#</a>&nbsp;<b><u>PromptingTools.load_api_keys!</u></b> &mdash; <i>Method</i>.\n\n\n\n\nLoads API keys from environment variables and preferences\n\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/user_preferences.jl#L170)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.load_conversation-Tuple{Union{AbstractString, IO}}' href='#PromptingTools.load_conversation-Tuple{Union{AbstractString, IO}}'>#</a>&nbsp;<b><u>PromptingTools.load_conversation</u></b> &mdash; <i>Method</i>.\n\n\n\n\n```julia\nload_conversation(io_or_file::Union{IO, AbstractString})

Loads a conversation (messages) from io_or_file

source


# PromptingTools.load_templateMethod.
julia
load_template(io_or_file::Union{IO, AbstractString})

Loads messaging template from io_or_file and returns tuple of template messages and metadata.

source


# PromptingTools.load_templates!Function.
julia
load_templates!(dir_templates::Union{String, Nothing} = nothing;\n    remember_path::Bool = true,\n    remove_templates::Bool = isnothing(dir_templates),\n    store::Dict{Symbol, <:Any} = TEMPLATE_STORE,\n    metadata_store::Vector{<:AITemplateMetadata} = TEMPLATE_METADATA)

Loads templates from folder templates/ in the package root and stores them in TEMPLATE_STORE and TEMPLATE_METADATA.

Note: Automatically removes any existing templates and metadata from TEMPLATE_STORE and TEMPLATE_METADATA if remove_templates=true.

Arguments

Example

Load the default templates:

julia
PT.load_templates!() # no path needed

Load templates from a new custom path:

julia
PT.load_templates!("path/to/templates") # we will remember this path for future refresh

If you want to now refresh the default templates and the new path, just call load_templates!() without any arguments.

source


# PromptingTools.metaMethod.

Extracts the metadata dictionary from the tracer message or tracer-like object.

source


# PromptingTools.ollama_apiFunction.
julia
ollama_api(prompt_schema::Union{AbstractOllamaManagedSchema, AbstractOllamaSchema},\n    prompt::Union{AbstractString, Nothing} = nothing;\n    system::Union{Nothing, AbstractString} = nothing,\n    messages::Vector{<:AbstractMessage} = AbstractMessage[],\n    endpoint::String = "generate",\n    model::String = "llama2", http_kwargs::NamedTuple = NamedTuple(),\n    stream::Bool = false,\n    url::String = "localhost", port::Int = 11434,\n    kwargs...)

Simple wrapper for a call to Ollama API.

Keyword Arguments

source


# PromptingTools.parse_toolMethod.
julia
parse_tool(datatype::Type, blob::AbstractString)

Parse the JSON blob into the specified datatype in try-catch mode.

If parsing fails, it tries to return the untyped JSON blob in a dictionary.

source


# PromptingTools.pprintFunction.

Utility for pretty printing PromptingTools types in REPL.

source


# PromptingTools.pprintMethod.
julia
pprint(io::IO, conversation::AbstractVector{<:AbstractMessage})

Pretty print a vector of AbstractMessage to the given IO stream.

source


# PromptingTools.pprintMethod.
julia
pprint(io::IO, msg::AbstractMessage; text_width::Int = displaysize(io)[2])

Pretty print a single AbstractMessage to the given IO stream.

text_width is the width of the text to be displayed. If not provided, it defaults to the width of the given IO stream and add newline separators as needed.

source


# PromptingTools.previewFunction.

Utility for rendering the conversation (vector of messages) as markdown. REQUIRES the Markdown package to load the extension! See also pprint

source


# PromptingTools.print_contentMethod.
julia
print_content(out::Channel, text::AbstractString; kwargs...)

Print the content to the provided Channel out.

source


# PromptingTools.print_contentMethod.
julia
print_content(out::IO, text::AbstractString; kwargs...)

Print the content to the IO output stream out.

source


# PromptingTools.print_contentMethod.
julia
print_content(out::Nothing, text::Any)

Do nothing if the output stream is nothing.

source


# PromptingTools.push_conversation!Method.
julia
push_conversation!(conv_history, conversation::AbstractVector, max_history::Union{Int, Nothing})

Add a new conversation to the conversation history and resize the history if necessary.

This function appends a conversation to the conv_history, which is a vector of conversations. Each conversation is represented as a vector of AbstractMessage objects. After adding the new conversation, the history is resized according to the max_history parameter to ensure that the size of the history does not exceed the specified limit.

Arguments

Returns

The updated conversation history.

Example

julia
new_conversation = aigenerate("Hello World"; return_all = true)\npush_conversation!(PT.CONV_HISTORY, new_conversation, 10)

This is done automatically by the ai"" macros.

source


# PromptingTools.recursive_splitterMethod.
julia
recursive_splitter(text::AbstractString, separators::Vector{String}; max_length::Int=35000) -> Vector{String}

Split a given string text into chunks recursively using a series of separators, with each chunk having a maximum length of max_length (if it's achievable given the separators provided). This function is useful for splitting large documents or texts into smaller segments that are more manageable for processing, particularly for models or systems with limited context windows.

It was previously known as split_by_length.

This is similar to Langchain's RecursiveCharacterTextSplitter. To achieve the same behavior, use separators=["\\n\\n", "\\n", " ", ""].

Arguments

Returns

Vector{String}: A vector of strings, where each string is a chunk of the original text that is smaller than or equal to max_length.

Usage Tips

How It Works

Examples

Splitting text using multiple separators:

julia
text = "Paragraph 1\\n\\nParagraph 2. Sentence 1. Sentence 2.\\nParagraph 3"\nseparators = ["\\n\\n", ". ", "\\n"] # split by paragraphs, sentences, and newlines (not by words)\nchunks = recursive_splitter(text, separators, max_length=20)

Splitting text using multiple separators - with splitting on words:

julia
text = "Paragraph 1\\n\\nParagraph 2. Sentence 1. Sentence 2.\\nParagraph 3"\nseparators = ["\\n\\n", ". ", "\\n", " "] # split by paragraphs, sentences, and newlines, words\nchunks = recursive_splitter(text, separators, max_length=10)

Using a single separator:

julia
text = "Hello,World," ^ 2900  # length 34900 characters\nchunks = recursive_splitter(text, [","], max_length=10000)

To achieve the same behavior as Langchain's RecursiveCharacterTextSplitter, use separators=["\\n\\n", "\\n", " ", ""].

julia
text = "Paragraph 1\\n\\nParagraph 2. Sentence 1. Sentence 2.\\nParagraph 3"\nseparators = ["\\n\\n", "\\n", " ", ""]\nchunks = recursive_splitter(text, separators, max_length=10)

source


# PromptingTools.recursive_splitterMethod.
julia
recursive_splitter(text::String; separator::String=" ", max_length::Int=35000) -> Vector{String}

Split a given string text into chunks of a specified maximum length max_length. This is particularly useful for splitting larger documents or texts into smaller segments, suitable for models or systems with smaller context windows.

There is a method for dispatching on multiple separators, recursive_splitter(text::String, separators::Vector{String}; max_length::Int=35000) -> Vector{String} that mimics the logic of Langchain's RecursiveCharacterTextSplitter.

Arguments

Returns

Vector{String}: A vector of strings, each representing a chunk of the original text that is smaller than or equal to max_length.

Notes

Examples

Splitting text with the default separator (" "):

julia
text = "Hello world. How are you?"\nchunks = recursive_splitter(text; max_length=13)\nlength(chunks) # Output: 2

Using a custom separator and custom max_length

julia
text = "Hello,World," ^ 2900 # length 34900 chars\nrecursive_splitter(text; separator=",", max_length=10000) # for 4K context window\nlength(chunks[1]) # Output: 4

source


# PromptingTools.register_model!Function.
julia
register_model!(registry = MODEL_REGISTRY;\n    name::String,\n    schema::Union{AbstractPromptSchema, Nothing} = nothing,\n    cost_of_token_prompt::Float64 = 0.0,\n    cost_of_token_generation::Float64 = 0.0,\n    description::String = "")

Register a new AI model with name and its associated schema.

Registering a model helps with calculating the costs and automatically selecting the right prompt schema.

Arguments

source


# PromptingTools.remove_julia_promptMethod.
julia
remove_julia_prompt(s::T) where {T<:AbstractString}

If it detects a julia prompt, it removes it and all lines that do not have it (except for those that belong to the code block).

source


# PromptingTools.remove_templates!Method.
julia
    remove_templates!()

Removes all templates from TEMPLATE_STORE and TEMPLATE_METADATA.

source


# PromptingTools.remove_unsafe_linesMethod.

Iterates over the lines of a string and removes those that contain a package operation or a missing import.

source


# PromptingTools.renderMethod.

Renders provided messaging template (template) under the default schema (PROMPT_SCHEMA).

source


', 109)), createBaseVNode("div", _hoisted_4, [ _cache[20] || (_cache[20] = createStaticVNode('# PromptingTools.renderMethod.
julia
render(schema::AbstractAnthropicSchema,\n    messages::Vector{<:AbstractMessage};\n    aiprefill::Union{Nothing, AbstractString} = nothing,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    no_system_message::Bool = false,\n    cache::Union{Nothing, Symbol} = nothing,\n    kwargs...)
', 7)), createBaseVNode("p", null, [ @@ -66,9 +66,9 @@ function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { createBaseVNode("code", null, toDisplayString(_ctx.key) + "=>value", 1), _cache[19] || (_cache[19] = createTextVNode(" in the template.")) ]), - _cache[21] || (_cache[21] = createStaticVNode('

Keyword Arguments

source

', 3)) + _cache[21] || (_cache[21] = createStaticVNode('

Keyword Arguments

source

', 3)) ]), - _cache[51] || (_cache[51] = createStaticVNode('
# PromptingTools.renderMethod.
julia
render(schema::AbstractAnthropicSchema,\n    tools::Vector{<:AbstractTool};\n    kwargs...)

Renders the tool signatures into the Anthropic format.

source


', 3)), + _cache[51] || (_cache[51] = createStaticVNode('
# PromptingTools.renderMethod.
julia
render(schema::AbstractAnthropicSchema,\n    tools::Vector{<:AbstractTool};\n    kwargs...)

Renders the tool signatures into the Anthropic format.

source


', 3)), createBaseVNode("div", _hoisted_5, [ _cache[24] || (_cache[24] = createStaticVNode('# PromptingTools.renderMethod.
julia
render(schema::AbstractGoogleSchema,\n    messages::Vector{<:AbstractMessage};\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    no_system_message::Bool = false,\n    kwargs...)
', 7)), createBaseVNode("p", null, [ @@ -76,7 +76,7 @@ function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { createBaseVNode("code", null, toDisplayString(_ctx.key) + "=>value", 1), _cache[23] || (_cache[23] = createTextVNode(" in the template.")) ]), - _cache[25] || (_cache[25] = createStaticVNode('

Keyword Arguments

source

', 3)) + _cache[25] || (_cache[25] = createStaticVNode('

Keyword Arguments

source

', 3)) ]), _cache[52] || (_cache[52] = createBaseVNode("br", null, null, -1)), createBaseVNode("div", _hoisted_6, [ @@ -104,7 +104,7 @@ function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { ], -1)), _cache[32] || (_cache[32] = createBaseVNode("p", null, [ createBaseVNode("a", { - href: "https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/llm_ollama_managed.jl#L9-L21", + href: "https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/llm_ollama_managed.jl#L9-L21", target: "_blank", rel: "noreferrer" }, "source") @@ -118,7 +118,7 @@ function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { createBaseVNode("code", null, toDisplayString(_ctx.key) + "=>value", 1), _cache[34] || (_cache[34] = createTextVNode(" in the template.")) ]), - _cache[36] || (_cache[36] = createStaticVNode('

Keyword Arguments

source

', 3)) + _cache[36] || (_cache[36] = createStaticVNode('

Keyword Arguments

source

', 3)) ]), _cache[54] || (_cache[54] = createBaseVNode("br", null, null, -1)), createBaseVNode("div", _hoisted_8, [ @@ -128,9 +128,9 @@ function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { createBaseVNode("code", null, toDisplayString(_ctx.key) + "=>value", 1), _cache[38] || (_cache[38] = createTextVNode(" in the template.")) ]), - _cache[40] || (_cache[40] = createStaticVNode('

Keyword Arguments

source

', 3)) + _cache[40] || (_cache[40] = createStaticVNode('

Keyword Arguments

source

', 3)) ]), - _cache[55] || (_cache[55] = createStaticVNode('
# PromptingTools.renderMethod.
julia
render(schema::AbstractOpenAISchema,\n    tools::Vector{<:AbstractTool};\n    json_mode::Union{Nothing, Bool} = nothing,\n    kwargs...)

Renders the tool signatures into the OpenAI format.

source


# PromptingTools.renderMethod.
julia
render(tracer_schema::AbstractTracerSchema,\n    conv::AbstractVector{<:AbstractMessage}; kwargs...)

Passthrough. No changes.

source


', 5)), + _cache[55] || (_cache[55] = createStaticVNode('
# PromptingTools.renderMethod.
julia
render(schema::AbstractOpenAISchema,\n    tools::Vector{<:AbstractTool};\n    json_mode::Union{Nothing, Bool} = nothing,\n    kwargs...)

Renders the tool signatures into the OpenAI format.

source


# PromptingTools.renderMethod.
julia
render(tracer_schema::AbstractTracerSchema,\n    conv::AbstractVector{<:AbstractMessage}; kwargs...)

Passthrough. No changes.

source


', 5)), createBaseVNode("div", _hoisted_9, [ _cache[45] || (_cache[45] = createStaticVNode('# PromptingTools.renderMethod.
julia
render(schema::NoSchema,\n    messages::Vector{<:AbstractMessage};\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    no_system_message::Bool = false,\n    replacement_kwargs...)

Renders a conversation history from a vector of messages with all replacement variables specified in replacement_kwargs.

It is the first pass of the prompt rendering system, and is used by all other schemas.

Keyword Arguments

Notes

', 12)), createBaseVNode("ul", null, [ @@ -150,13 +150,13 @@ function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { ]), _cache[46] || (_cache[46] = createBaseVNode("p", null, [ createBaseVNode("a", { - href: "https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/llm_shared.jl#L11-L31", + href: "https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/llm_shared.jl#L11-L31", target: "_blank", rel: "noreferrer" }, "source") ], -1)) ]), - _cache[56] || (_cache[56] = createStaticVNode('
# PromptingTools.replace_wordsMethod.
julia
replace_words(text::AbstractString, words::Vector{<:AbstractString}; replacement::AbstractString="ABC")

Replace all occurrences of words in words with replacement in text. Useful to quickly remove specific names or entities from a text.

Arguments

Example

julia
text = "Disney is a great company"\nreplace_words(text, ["Disney", "Snow White", "Mickey Mouse"])\n# Output: "ABC is a great company"

source


# PromptingTools.resize_conversation!Method.
julia
resize_conversation!(conv_history, max_history::Union{Int, Nothing})

Resize the conversation history to a specified maximum length.

This function trims the conv_history to ensure that its size does not exceed max_history. It removes the oldest conversations first if the length of conv_history is greater than max_history.

Arguments

Returns

The resized conversation history.

Example

julia
resize_conversation!(PT.CONV_HISTORY, PT.MAX_HISTORY_LENGTH)

After the function call, conv_history will contain only the 10 most recent conversations.

This is done automatically by the ai"" macros.

source


# PromptingTools.response_to_messageMethod.
julia
response_to_message(schema::AbstractOpenAISchema,\n    MSG::Type{AIMessage},\n    choice,\n    resp;\n    model_id::AbstractString = "",\n    time::Float64 = 0.0,\n    run_id::Int = Int(rand(Int32)),\n    sample_id::Union{Nothing, Integer} = nothing,\n    name_assistant::Union{Nothing, String} = nothing)

Utility to facilitate unwrapping of HTTP response to a message type MSG provided for OpenAI-like responses

Note: Extracts finish_reason and log_prob if available in the response.

Arguments

source


# PromptingTools.response_to_messageMethod.

Utility to facilitate unwrapping of HTTP response to a message type MSG provided. Designed to handle multi-sample completions.

source


# PromptingTools.save_conversationMethod.
julia
save_conversation(io_or_file::Union{IO, AbstractString},\n    messages::AbstractVector{<:AbstractMessage})

Saves provided conversation (messages) to io_or_file. If you need to add some metadata, see save_template.

source


# PromptingTools.save_conversationsMethod.
julia
save_conversations(schema::AbstractPromptSchema, filename::AbstractString,\n    conversations::Vector{<:AbstractVector{<:PT.AbstractMessage}})

Saves provided conversations (vector of vectors of messages) to filename rendered in the particular schema.

Commonly used for finetuning models with schema = ShareGPTSchema()

The format is JSON Lines, where each line is a JSON object representing one provided conversation.

See also: save_conversation

Examples

You must always provide a VECTOR of conversations

julia
messages = AbstractMessage[SystemMessage("System message 1"),\n    UserMessage("User message"),\n    AIMessage("AI message")]\nconversation = [messages] # vector of vectors\n\ndir = tempdir()\nfn = joinpath(dir, "conversations.jsonl")\nsave_conversations(fn, conversation)\n\n# Content of the file (one line for each conversation)\n# {"conversations":[{"value":"System message 1","from":"system"},{"value":"User message","from":"human"},{"value":"AI message","from":"gpt"}]}

source


# PromptingTools.save_templateMethod.
julia
save_template(io_or_file::Union{IO, AbstractString},\n    messages::AbstractVector{<:AbstractChatMessage};\n    content::AbstractString = "Template Metadata",\n    description::AbstractString = "",\n    version::AbstractString = "1",\n    source::AbstractString = "")

Saves provided messaging template (messages) to io_or_file. Automatically adds metadata based on provided keyword arguments.

source


# PromptingTools.set_preferences!Method.
julia
set_preferences!(pairs::Pair{String, <:Any}...)

Set preferences for PromptingTools. See ?PREFERENCES for more information.

See also: get_preferences

Example

Change your API key and default model:

julia
PromptingTools.set_preferences!("OPENAI_API_KEY" => "key1", "MODEL_CHAT" => "chat1")

source


# PromptingTools.set_properties_strict!Method.
julia
set_properties_strict!(properties::AbstractDict)

Sets strict mode for the properties of a JSON schema.

Changes:

Reference: https://platform.openai.com/docs/guides/structured-outputs/supported-schemas

source


# PromptingTools.streamed_request!Method.
julia
streamed_request!(cb::AbstractStreamCallback, url, headers, input; kwargs...)

End-to-end wrapper for POST streaming requests. In-place modification of the callback object (cb.chunks) with the results of the request being returned. We build the body of the response object in the end and write it into the resp.body.

Returns the response object.

Arguments

source


', 21)), + _cache[56] || (_cache[56] = createStaticVNode('
# PromptingTools.replace_wordsMethod.
julia
replace_words(text::AbstractString, words::Vector{<:AbstractString}; replacement::AbstractString="ABC")

Replace all occurrences of words in words with replacement in text. Useful to quickly remove specific names or entities from a text.

Arguments

Example

julia
text = "Disney is a great company"\nreplace_words(text, ["Disney", "Snow White", "Mickey Mouse"])\n# Output: "ABC is a great company"

source


# PromptingTools.resize_conversation!Method.
julia
resize_conversation!(conv_history, max_history::Union{Int, Nothing})

Resize the conversation history to a specified maximum length.

This function trims the conv_history to ensure that its size does not exceed max_history. It removes the oldest conversations first if the length of conv_history is greater than max_history.

Arguments

Returns

The resized conversation history.

Example

julia
resize_conversation!(PT.CONV_HISTORY, PT.MAX_HISTORY_LENGTH)

After the function call, conv_history will contain only the 10 most recent conversations.

This is done automatically by the ai"" macros.

source


# PromptingTools.response_to_messageMethod.
julia
response_to_message(schema::AbstractOpenAISchema,\n    MSG::Type{AIMessage},\n    choice,\n    resp;\n    model_id::AbstractString = "",\n    time::Float64 = 0.0,\n    run_id::Int = Int(rand(Int32)),\n    sample_id::Union{Nothing, Integer} = nothing,\n    name_assistant::Union{Nothing, String} = nothing)

Utility to facilitate unwrapping of HTTP response to a message type MSG provided for OpenAI-like responses

Note: Extracts finish_reason and log_prob if available in the response.

Arguments

source


# PromptingTools.response_to_messageMethod.

Utility to facilitate unwrapping of HTTP response to a message type MSG provided. Designed to handle multi-sample completions.

source


# PromptingTools.save_conversationMethod.
julia
save_conversation(io_or_file::Union{IO, AbstractString},\n    messages::AbstractVector{<:AbstractMessage})

Saves provided conversation (messages) to io_or_file. If you need to add some metadata, see save_template.

source


# PromptingTools.save_conversationsMethod.
julia
save_conversations(schema::AbstractPromptSchema, filename::AbstractString,\n    conversations::Vector{<:AbstractVector{<:PT.AbstractMessage}})

Saves provided conversations (vector of vectors of messages) to filename rendered in the particular schema.

Commonly used for finetuning models with schema = ShareGPTSchema()

The format is JSON Lines, where each line is a JSON object representing one provided conversation.

See also: save_conversation

Examples

You must always provide a VECTOR of conversations

julia
messages = AbstractMessage[SystemMessage("System message 1"),\n    UserMessage("User message"),\n    AIMessage("AI message")]\nconversation = [messages] # vector of vectors\n\ndir = tempdir()\nfn = joinpath(dir, "conversations.jsonl")\nsave_conversations(fn, conversation)\n\n# Content of the file (one line for each conversation)\n# {"conversations":[{"value":"System message 1","from":"system"},{"value":"User message","from":"human"},{"value":"AI message","from":"gpt"}]}

source


# PromptingTools.save_templateMethod.
julia
save_template(io_or_file::Union{IO, AbstractString},\n    messages::AbstractVector{<:AbstractChatMessage};\n    content::AbstractString = "Template Metadata",\n    description::AbstractString = "",\n    version::AbstractString = "1",\n    source::AbstractString = "")

Saves provided messaging template (messages) to io_or_file. Automatically adds metadata based on provided keyword arguments.

source


# PromptingTools.set_preferences!Method.
julia
set_preferences!(pairs::Pair{String, <:Any}...)

Set preferences for PromptingTools. See ?PREFERENCES for more information.

See also: get_preferences

Example

Change your API key and default model:

julia
PromptingTools.set_preferences!("OPENAI_API_KEY" => "key1", "MODEL_CHAT" => "chat1")

source


# PromptingTools.set_properties_strict!Method.
julia
set_properties_strict!(properties::AbstractDict)

Sets strict mode for the properties of a JSON schema.

Changes:

Reference: https://platform.openai.com/docs/guides/structured-outputs/supported-schemas

source


# PromptingTools.streamed_request!Method.
julia
streamed_request!(cb::AbstractStreamCallback, url, headers, input; kwargs...)

End-to-end wrapper for POST streaming requests. In-place modification of the callback object (cb.chunks) with the results of the request being returned. We build the body of the response object in the end and write it into the resp.body.

Returns the response object.

Arguments

source


', 21)), _cache[57] || (_cache[57] = createBaseVNode("div", { style: { "border-width": "1px", "border-style": "solid", "border-color": "black", "padding": "1em", "border-radius": "25px" } }, [ createBaseVNode("a", { id: "PromptingTools.tool_call_signature-Tuple{Union{Method, Type}}", @@ -503,7 +503,7 @@ function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { ]), createTextVNode("\n"), createBaseVNode("span", { class: "line" }, [ - createBaseVNode("span", null, "[source](https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/extraction.jl#L341-L424)") + createBaseVNode("span", null, "[source](https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/extraction.jl#L341-L424)") ]), createTextVNode("\n"), createBaseVNode("span", { class: "line" }, [ @@ -713,13 +713,13 @@ function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { ]), createBaseVNode("p", null, [ createBaseVNode("a", { - href: "https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/extraction.jl#L505-L538", + href: "https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/extraction.jl#L505-L538", target: "_blank", rel: "noreferrer" }, "source") ]) ], -1)), - _cache[58] || (_cache[58] = createStaticVNode('
# PromptingTools.unique_permutationMethod.
julia
unique_permutation(inputs::AbstractVector)

Returns indices of unique items in a vector inputs. Access the unique values as inputs[unique_permutation(inputs)].

source


# PromptingTools.unwrapMethod.

Unwraps the tracer message or tracer-like object, returning the original object.

source


# PromptingTools.update_field_descriptions!Method.
julia
update_field_descriptions!(\n    parameters::Dict{String, <:Any}, descriptions::Dict{Symbol, <:AbstractString};\n    max_description_length::Int = 200)

Update the given JSON schema with descriptions from the descriptions dictionary. This function modifies the schema in-place, adding a "description" field to each property that has a corresponding entry in the descriptions dictionary.

Note: It modifies the schema in place. Only the top-level "properties" are updated!

Returns: The modified schema dictionary.

Arguments

Examples

julia
    parameters = Dict{String, Any}(\n        "properties" => Dict{String, Any}(\n            "location" => Dict{String, Any}("type" => "string"),\n            "condition" => Dict{String, Any}("type" => "string"),\n            "temperature" => Dict{String, Any}("type" => "number")\n        ),\n        "required" => ["location", "temperature", "condition"],\n        "type" => "object"\n    )\n    descriptions = Dict{Symbol, String}(\n        :temperature => "Temperature in degrees Fahrenheit",\n        :condition => "Current weather condition (e.g., sunny, rainy, cloudy)"\n    )\n    update_field_descriptions!(parameters, descriptions)

source


# PromptingTools.wrap_stringFunction.
julia
wrap_string(str::String,\n    text_width::Int = 20;\n    newline::Union{AbstractString, AbstractChar} = '

')

Breaks a string into lines of a given text_width. Optionally, you can specify the newline character or string to use.

Example:

julia
wrap_string("Certainly, here's a function in Julia that will wrap a string according to the specifications:", 10) |> print

source


# PromptingTools.@aai_strMacro.
julia
aai"user_prompt"[model_alias] -> AIMessage

Asynchronous version of @ai_str macro, which will log the result once it's ready.

See also aai!"" if you want an asynchronous reply to the provided message / continue the conversation.

Example

Send asynchronous request to GPT-4, so we don't have to wait for the response: Very practical with slow models, so you can keep working in the meantime.

julia
\n**...with some delay...**\n\n**[ Info: Tokens: 29 @ Cost: 0.0011\n in 2.7 seconds**\n\n**[ Info: AIMessage> Hello! How can I assist you today?**\n\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/macros.jl#L99-L116)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.@ai!_str-Tuple{Any, Vararg{Any}}' href='#PromptingTools.@ai!_str-Tuple{Any, Vararg{Any}}'>#</a>&nbsp;<b><u>PromptingTools.@ai!_str</u></b> &mdash; <i>Macro</i>.\n\n\n\n\n```julia\nai!"user_prompt"[model_alias] -> AIMessage

The ai!"" string macro is used to continue a previous conversation with the AI model.

It appends the new user prompt to the last conversation in the tracked history (in PromptingTools.CONV_HISTORY) and generates a response based on the entire conversation context. If you want to see the previous conversation, you can access it via PromptingTools.CONV_HISTORY, which keeps at most last PromptingTools.MAX_HISTORY_LENGTH conversations.

Arguments

Returns

AIMessage corresponding to the new user prompt, considering the entire conversation history.

Example

To continue a conversation:

julia
# start conversation as normal\nai"Say hi." \n\n# ... wait for reply and then react to it:\n\n# continue the conversation (notice that you can change the model, eg, to more powerful one for better answer)\nai!"What do you think about that?"gpt4t\n# AIMessage("Considering our previous discussion, I think that...")

Usage Notes

Important

Ensure that the conversation history is not too long to maintain relevancy and coherence in the AI's responses. The history length is managed by MAX_HISTORY_LENGTH.

source


# PromptingTools.@ai_strMacro.
julia
ai"user_prompt"[model_alias] -> AIMessage

The ai"" string macro generates an AI response to a given prompt by using aigenerate under the hood.

See also ai!"" if you want to reply to the provided message / continue the conversation.

Arguments

Returns

AIMessage corresponding to the input prompt.

Example

julia
result = ai"Hello, how are you?"\n# AIMessage("Hello! I'm an AI assistant, so I don't have feelings, but I'm here to help you. How can I assist you today?")

If you want to interpolate some variables or additional context, simply use string interpolation:

julia
a=1\nresult = ai"What is `$a+$a`?"\n# AIMessage("The sum of `1+1` is `2`.")

If you want to use a different model, eg, GPT-4, you can provide its alias as a flag:

julia
result = ai"What is `1.23 * 100 + 1`?"gpt4t\n# AIMessage("The answer is 124.")

source


# PromptingTools.@timeoutMacro.
julia
@timeout(seconds, expr_to_run, expr_when_fails)

Simple macro to run an expression with a timeout of seconds. If the expr_to_run fails to finish in seconds seconds, expr_when_fails is returned.

Example

julia
x = @timeout 1 begin\n    sleep(1.1)\n    println("done")\n    1\nend "failed"

source


', 15)) + _cache[58] || (_cache[58] = createStaticVNode('
# PromptingTools.unique_permutationMethod.
julia
unique_permutation(inputs::AbstractVector)

Returns indices of unique items in a vector inputs. Access the unique values as inputs[unique_permutation(inputs)].

source


# PromptingTools.unwrapMethod.

Unwraps the tracer message or tracer-like object, returning the original object.

source


# PromptingTools.update_field_descriptions!Method.
julia
update_field_descriptions!(\n    parameters::Dict{String, <:Any}, descriptions::Dict{Symbol, <:AbstractString};\n    max_description_length::Int = 200)

Update the given JSON schema with descriptions from the descriptions dictionary. This function modifies the schema in-place, adding a "description" field to each property that has a corresponding entry in the descriptions dictionary.

Note: It modifies the schema in place. Only the top-level "properties" are updated!

Returns: The modified schema dictionary.

Arguments

Examples

julia
    parameters = Dict{String, Any}(\n        "properties" => Dict{String, Any}(\n            "location" => Dict{String, Any}("type" => "string"),\n            "condition" => Dict{String, Any}("type" => "string"),\n            "temperature" => Dict{String, Any}("type" => "number")\n        ),\n        "required" => ["location", "temperature", "condition"],\n        "type" => "object"\n    )\n    descriptions = Dict{Symbol, String}(\n        :temperature => "Temperature in degrees Fahrenheit",\n        :condition => "Current weather condition (e.g., sunny, rainy, cloudy)"\n    )\n    update_field_descriptions!(parameters, descriptions)

source


# PromptingTools.wrap_stringFunction.
julia
wrap_string(str::String,\n    text_width::Int = 20;\n    newline::Union{AbstractString, AbstractChar} = '

')

Breaks a string into lines of a given text_width. Optionally, you can specify the newline character or string to use.

Example:

julia
wrap_string("Certainly, here's a function in Julia that will wrap a string according to the specifications:", 10) |> print

source


# PromptingTools.@aai_strMacro.
julia
aai"user_prompt"[model_alias] -> AIMessage

Asynchronous version of @ai_str macro, which will log the result once it's ready.

See also aai!"" if you want an asynchronous reply to the provided message / continue the conversation.

Example

Send asynchronous request to GPT-4, so we don't have to wait for the response: Very practical with slow models, so you can keep working in the meantime.

julia
\n**...with some delay...**\n\n**[ Info: Tokens: 29 @ Cost: 0.0011\n in 2.7 seconds**\n\n**[ Info: AIMessage> Hello! How can I assist you today?**\n\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/macros.jl#L99-L116)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.@ai!_str-Tuple{Any, Vararg{Any}}' href='#PromptingTools.@ai!_str-Tuple{Any, Vararg{Any}}'>#</a>&nbsp;<b><u>PromptingTools.@ai!_str</u></b> &mdash; <i>Macro</i>.\n\n\n\n\n```julia\nai!"user_prompt"[model_alias] -> AIMessage

The ai!"" string macro is used to continue a previous conversation with the AI model.

It appends the new user prompt to the last conversation in the tracked history (in PromptingTools.CONV_HISTORY) and generates a response based on the entire conversation context. If you want to see the previous conversation, you can access it via PromptingTools.CONV_HISTORY, which keeps at most last PromptingTools.MAX_HISTORY_LENGTH conversations.

Arguments

Returns

AIMessage corresponding to the new user prompt, considering the entire conversation history.

Example

To continue a conversation:

julia
# start conversation as normal\nai"Say hi." \n\n# ... wait for reply and then react to it:\n\n# continue the conversation (notice that you can change the model, eg, to more powerful one for better answer)\nai!"What do you think about that?"gpt4t\n# AIMessage("Considering our previous discussion, I think that...")

Usage Notes

Important

Ensure that the conversation history is not too long to maintain relevancy and coherence in the AI's responses. The history length is managed by MAX_HISTORY_LENGTH.

source


# PromptingTools.@ai_strMacro.
julia
ai"user_prompt"[model_alias] -> AIMessage

The ai"" string macro generates an AI response to a given prompt by using aigenerate under the hood.

See also ai!"" if you want to reply to the provided message / continue the conversation.

Arguments

Returns

AIMessage corresponding to the input prompt.

Example

julia
result = ai"Hello, how are you?"\n# AIMessage("Hello! I'm an AI assistant, so I don't have feelings, but I'm here to help you. How can I assist you today?")

If you want to interpolate some variables or additional context, simply use string interpolation:

julia
a=1\nresult = ai"What is `$a+$a`?"\n# AIMessage("The sum of `1+1` is `2`.")

If you want to use a different model, eg, GPT-4, you can provide its alias as a flag:

julia
result = ai"What is `1.23 * 100 + 1`?"gpt4t\n# AIMessage("The answer is 124.")

source


# PromptingTools.@timeoutMacro.
julia
@timeout(seconds, expr_to_run, expr_when_fails)

Simple macro to run an expression with a timeout of seconds. If the expr_to_run fails to finish in seconds seconds, expr_when_fails is returned.

Example

julia
x = @timeout 1 begin\n    sleep(1.1)\n    println("done")\n    1\nend "failed"

source


', 15)) ]); } const reference = /* @__PURE__ */ _export_sfc(_sfc_main, [["render", _sfc_render]]); diff --git a/previews/PR218/assets/reference.md.DMO0NW9D.lean.js b/previews/PR218/assets/reference.md.Bl4MWKuL.lean.js similarity index 96% rename from previews/PR218/assets/reference.md.DMO0NW9D.lean.js rename to previews/PR218/assets/reference.md.Bl4MWKuL.lean.js index 44ef1f481..061682a66 100644 --- a/previews/PR218/assets/reference.md.DMO0NW9D.lean.js +++ b/previews/PR218/assets/reference.md.Bl4MWKuL.lean.js @@ -12,7 +12,7 @@ const _hoisted_8 = { style: { "border-width": "1px", "border-style": "solid", "b const _hoisted_9 = { style: { "border-width": "1px", "border-style": "solid", "border-color": "black", "padding": "1em", "border-radius": "25px" } }; function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { return openBlock(), createElementBlock("div", null, [ - _cache[47] || (_cache[47] = createStaticVNode('

Reference

# PromptingTools.ALLOWED_PREFERENCESConstant.

Keys that are allowed to be set via set_preferences!

source


# PromptingTools.ALTERNATIVE_GENERATION_COSTSConstant.
julia
ALTERNATIVE_GENERATION_COSTS

Tracker of alternative costing models, eg, for image generation (dall-e-3), the cost is driven by quality/size.

source


# PromptingTools.ANTHROPIC_TOOL_PROMPTConstant.

Simple template to add to the System Message when doing data extraction with Anthropic models.

It has 2 placeholders: tool_name, tool_description and tool_parameters that are filled with the tool's name, description and parameters. Source: https://docs.anthropic.com/claude/docs/functions-external-tools

source


# PromptingTools.CONV_HISTORYConstant.
julia
CONV_HISTORY

Tracks the most recent conversations through the ai_str macros.

Preference available: MAX_HISTORY_LENGTH, which sets how many last messages should be remembered.

See also: push_conversation!, resize_conversation!

source


# PromptingTools.MODEL_ALIASESConstant.
julia
MODEL_ALIASES

A dictionary of model aliases. Aliases are used to refer to models by their aliases instead of their full names to make it more convenient to use them.

Accessing the aliases

PromptingTools.MODEL_ALIASES["gpt3"]

Register a new model alias

julia
PromptingTools.MODEL_ALIASES["gpt3"] = "gpt-3.5-turbo"

source


# PromptingTools.MODEL_REGISTRYConstant.
julia
MODEL_REGISTRY

A store of available model names and their specs (ie, name, costs per token, etc.)

Accessing the registry

You can use both the alias name or the full name to access the model spec:

PromptingTools.MODEL_REGISTRY["gpt-3.5-turbo"]

Registering a new model

julia
register_model!(\n    name = "gpt-3.5-turbo",\n    schema = :OpenAISchema,\n    cost_of_token_prompt = 0.0015,\n    cost_of_token_generation = 0.002,\n    description = "GPT-3.5 Turbo is a 175B parameter model and a common default on the OpenAI API.")

Registering a model alias

julia
PromptingTools.MODEL_ALIASES["gpt3"] = "gpt-3.5-turbo"

source


# PromptingTools.OPENAI_TOKEN_IDS_GPT35_GPT4Constant.

Token IDs for GPT3.5 and GPT4 from https://platform.openai.com/tokenizer

source


# PromptingTools.PREFERENCESConstant.
julia
PREFERENCES

You can set preferences for PromptingTools by setting environment variables or by using the set_preferences!. It will create a LocalPreferences.toml file in your current directory and will reload your prefences from there.

Check your preferences by calling get_preferences(key::String).

Available Preferences (for set_preferences!)

At the moment it is not possible to persist changes to MODEL_REGISTRY across sessions. Define your register_model!() calls in your startup.jl file to make them available across sessions or put them at the top of your script.

Available ENV Variables

Preferences.jl takes priority over ENV variables, so if you set a preference, it will take precedence over the ENV variable.

WARNING: NEVER EVER sync your LocalPreferences.toml file! It contains your API key and other sensitive information!!!

source


# PromptingTools.RESERVED_KWARGSConstant.

The following keywords are reserved for internal use in the ai* functions and cannot be used as placeholders in the Messages

source


# PromptingTools.AICodeType.
julia
AICode(code::AbstractString; auto_eval::Bool=true, safe_eval::Bool=false, \nskip_unsafe::Bool=false, capture_stdout::Bool=true, verbose::Bool=false,\nprefix::AbstractString="", suffix::AbstractString="", remove_tests::Bool=false, execution_timeout::Int = 60)\n\nAICode(msg::AIMessage; auto_eval::Bool=true, safe_eval::Bool=false, \nskip_unsafe::Bool=false, skip_invalid::Bool=false, capture_stdout::Bool=true,\nverbose::Bool=false, prefix::AbstractString="", suffix::AbstractString="", remove_tests::Bool=false, execution_timeout::Int = 60)

A mutable structure representing a code block (received from the AI model) with automatic parsing, execution, and output/error capturing capabilities.

Upon instantiation with a string, the AICode object automatically runs a code parser and executor (via PromptingTools.eval!()), capturing any standard output (stdout) or errors. This structure is useful for programmatically handling and evaluating Julia code snippets.

See also: PromptingTools.extract_code_blocks, PromptingTools.eval!

Workflow

Properties

Keyword Arguments

Methods

Examples

julia
code = AICode("println("Hello, World!")") # Auto-parses and evaluates the code, capturing output and errors.\nisvalid(code) # Output: true\ncode.stdout # Output: "Hello, World!\n"

We try to evaluate "safely" by default (eg, inside a custom module, to avoid changing user variables). You can avoid that with save_eval=false:

julia
code = AICode("new_variable = 1"; safe_eval=false)\nisvalid(code) # Output: true\nnew_variable # Output: 1

You can also call AICode directly on an AIMessage, which will extract the Julia code blocks, concatenate them and evaluate them:

julia
msg = aigenerate("In Julia, how do you create a vector of 10 random numbers?")\ncode = AICode(msg)\n# Output: AICode(Success: True, Parsed: True, Evaluated: True, Error Caught: N/A, StdOut: True, Code: 2 Lines)\n\n# show the code\ncode.code |> println\n# Output: \n# numbers = rand(10)\n# numbers = rand(1:100, 10)\n\n# or copy it to the clipboard\ncode.code |> clipboard\n\n# or execute it in the current module (=Main)\neval(code.expression)

source


# PromptingTools.AIMessageType.
julia
AIMessage

A message type for AI-generated text-based responses. Returned by aigenerate, aiclassify, and aiscan functions.

Fields

source


# PromptingTools.AITemplateType.
julia
AITemplate

AITemplate is a template for a conversation prompt. This type is merely a container for the template name, which is resolved into a set of messages (=prompt) by render.

Naming Convention

Examples

Save time by re-using pre-made templates, just fill in the placeholders with the keyword arguments:

julia
msg = aigenerate(:JuliaExpertAsk; ask = "How do I add packages?")

The above is equivalent to a more verbose version that explicitly uses the dispatch on AITemplate:

julia
msg = aigenerate(AITemplate(:JuliaExpertAsk); ask = "How do I add packages?")

Find available templates with aitemplates:

julia
tmps = aitemplates("JuliaExpertAsk")\n# Will surface one specific template\n# 1-element Vector{AITemplateMetadata}:\n# PromptingTools.AITemplateMetadata\n#   name: Symbol JuliaExpertAsk\n#   description: String "For asking questions about Julia language. Placeholders: `ask`"\n#   version: String "1"\n#   wordcount: Int64 237\n#   variables: Array{Symbol}((1,))\n#   system_preview: String "You are a world-class Julia language programmer with the knowledge of the latest syntax. Your commun"\n#   user_preview: String "# Question\n\n{{ask}}"\n#   source: String ""

The above gives you a good idea of what the template is about, what placeholders are available, and how much it would cost to use it (=wordcount).

Search for all Julia-related templates:

julia
tmps = aitemplates("Julia")\n# 2-element Vector{AITemplateMetadata}... -> more to come later!

If you are on VSCode, you can leverage nice tabular display with vscodedisplay:

julia
using DataFrames\ntmps = aitemplates("Julia") |> DataFrame |> vscodedisplay

I have my selected template, how do I use it? Just use the "name" in aigenerate or aiclassify like you see in the first example!

You can inspect any template by "rendering" it (this is what the LLM will see):

julia
julia> AITemplate(:JudgeIsItTrue) |> PromptingTools.render

See also: save_template, load_template, load_templates! for more advanced use cases (and the corresponding script in examples/ folder)

source


# PromptingTools.AITemplateMetadataType.

Helper for easy searching and reviewing of templates. Defined on loading of each template.

source


# PromptingTools.AbstractPromptSchemaType.

Defines different prompting styles based on the model training and fine-tuning.

source


# PromptingTools.AbstractToolType.
julia
AbstractTool

Abstract type for all tool types.

Required fields:

source


', 32)), + _cache[47] || (_cache[47] = createStaticVNode('

Reference

# PromptingTools.ALLOWED_PREFERENCESConstant.

Keys that are allowed to be set via set_preferences!

source


# PromptingTools.ALTERNATIVE_GENERATION_COSTSConstant.
julia
ALTERNATIVE_GENERATION_COSTS

Tracker of alternative costing models, eg, for image generation (dall-e-3), the cost is driven by quality/size.

source


# PromptingTools.ANTHROPIC_TOOL_PROMPTConstant.

Simple template to add to the System Message when doing data extraction with Anthropic models.

It has 2 placeholders: tool_name, tool_description and tool_parameters that are filled with the tool's name, description and parameters. Source: https://docs.anthropic.com/claude/docs/functions-external-tools

source


# PromptingTools.CONV_HISTORYConstant.
julia
CONV_HISTORY

Tracks the most recent conversations through the ai_str macros.

Preference available: MAX_HISTORY_LENGTH, which sets how many last messages should be remembered.

See also: push_conversation!, resize_conversation!

source


# PromptingTools.MODEL_ALIASESConstant.
julia
MODEL_ALIASES

A dictionary of model aliases. Aliases are used to refer to models by their aliases instead of their full names to make it more convenient to use them.

Accessing the aliases

PromptingTools.MODEL_ALIASES["gpt3"]

Register a new model alias

julia
PromptingTools.MODEL_ALIASES["gpt3"] = "gpt-3.5-turbo"

source


# PromptingTools.MODEL_REGISTRYConstant.
julia
MODEL_REGISTRY

A store of available model names and their specs (ie, name, costs per token, etc.)

Accessing the registry

You can use both the alias name or the full name to access the model spec:

PromptingTools.MODEL_REGISTRY["gpt-3.5-turbo"]

Registering a new model

julia
register_model!(\n    name = "gpt-3.5-turbo",\n    schema = :OpenAISchema,\n    cost_of_token_prompt = 0.0015,\n    cost_of_token_generation = 0.002,\n    description = "GPT-3.5 Turbo is a 175B parameter model and a common default on the OpenAI API.")

Registering a model alias

julia
PromptingTools.MODEL_ALIASES["gpt3"] = "gpt-3.5-turbo"

source


# PromptingTools.OPENAI_TOKEN_IDS_GPT35_GPT4Constant.

Token IDs for GPT3.5 and GPT4 from https://platform.openai.com/tokenizer

source


# PromptingTools.PREFERENCESConstant.
julia
PREFERENCES

You can set preferences for PromptingTools by setting environment variables or by using the set_preferences!. It will create a LocalPreferences.toml file in your current directory and will reload your prefences from there.

Check your preferences by calling get_preferences(key::String).

Available Preferences (for set_preferences!)

At the moment it is not possible to persist changes to MODEL_REGISTRY across sessions. Define your register_model!() calls in your startup.jl file to make them available across sessions or put them at the top of your script.

Available ENV Variables

Preferences.jl takes priority over ENV variables, so if you set a preference, it will take precedence over the ENV variable.

WARNING: NEVER EVER sync your LocalPreferences.toml file! It contains your API key and other sensitive information!!!

source


# PromptingTools.RESERVED_KWARGSConstant.

The following keywords are reserved for internal use in the ai* functions and cannot be used as placeholders in the Messages

source


# PromptingTools.AICodeType.
julia
AICode(code::AbstractString; auto_eval::Bool=true, safe_eval::Bool=false, \nskip_unsafe::Bool=false, capture_stdout::Bool=true, verbose::Bool=false,\nprefix::AbstractString="", suffix::AbstractString="", remove_tests::Bool=false, execution_timeout::Int = 60)\n\nAICode(msg::AIMessage; auto_eval::Bool=true, safe_eval::Bool=false, \nskip_unsafe::Bool=false, skip_invalid::Bool=false, capture_stdout::Bool=true,\nverbose::Bool=false, prefix::AbstractString="", suffix::AbstractString="", remove_tests::Bool=false, execution_timeout::Int = 60)

A mutable structure representing a code block (received from the AI model) with automatic parsing, execution, and output/error capturing capabilities.

Upon instantiation with a string, the AICode object automatically runs a code parser and executor (via PromptingTools.eval!()), capturing any standard output (stdout) or errors. This structure is useful for programmatically handling and evaluating Julia code snippets.

See also: PromptingTools.extract_code_blocks, PromptingTools.eval!

Workflow

Properties

Keyword Arguments

Methods

Examples

julia
code = AICode("println("Hello, World!")") # Auto-parses and evaluates the code, capturing output and errors.\nisvalid(code) # Output: true\ncode.stdout # Output: "Hello, World!\n"

We try to evaluate "safely" by default (eg, inside a custom module, to avoid changing user variables). You can avoid that with save_eval=false:

julia
code = AICode("new_variable = 1"; safe_eval=false)\nisvalid(code) # Output: true\nnew_variable # Output: 1

You can also call AICode directly on an AIMessage, which will extract the Julia code blocks, concatenate them and evaluate them:

julia
msg = aigenerate("In Julia, how do you create a vector of 10 random numbers?")\ncode = AICode(msg)\n# Output: AICode(Success: True, Parsed: True, Evaluated: True, Error Caught: N/A, StdOut: True, Code: 2 Lines)\n\n# show the code\ncode.code |> println\n# Output: \n# numbers = rand(10)\n# numbers = rand(1:100, 10)\n\n# or copy it to the clipboard\ncode.code |> clipboard\n\n# or execute it in the current module (=Main)\neval(code.expression)

source


# PromptingTools.AIMessageType.
julia
AIMessage

A message type for AI-generated text-based responses. Returned by aigenerate, aiclassify, and aiscan functions.

Fields

source


# PromptingTools.AITemplateType.
julia
AITemplate

AITemplate is a template for a conversation prompt. This type is merely a container for the template name, which is resolved into a set of messages (=prompt) by render.

Naming Convention

Examples

Save time by re-using pre-made templates, just fill in the placeholders with the keyword arguments:

julia
msg = aigenerate(:JuliaExpertAsk; ask = "How do I add packages?")

The above is equivalent to a more verbose version that explicitly uses the dispatch on AITemplate:

julia
msg = aigenerate(AITemplate(:JuliaExpertAsk); ask = "How do I add packages?")

Find available templates with aitemplates:

julia
tmps = aitemplates("JuliaExpertAsk")\n# Will surface one specific template\n# 1-element Vector{AITemplateMetadata}:\n# PromptingTools.AITemplateMetadata\n#   name: Symbol JuliaExpertAsk\n#   description: String "For asking questions about Julia language. Placeholders: `ask`"\n#   version: String "1"\n#   wordcount: Int64 237\n#   variables: Array{Symbol}((1,))\n#   system_preview: String "You are a world-class Julia language programmer with the knowledge of the latest syntax. Your commun"\n#   user_preview: String "# Question\n\n{{ask}}"\n#   source: String ""

The above gives you a good idea of what the template is about, what placeholders are available, and how much it would cost to use it (=wordcount).

Search for all Julia-related templates:

julia
tmps = aitemplates("Julia")\n# 2-element Vector{AITemplateMetadata}... -> more to come later!

If you are on VSCode, you can leverage nice tabular display with vscodedisplay:

julia
using DataFrames\ntmps = aitemplates("Julia") |> DataFrame |> vscodedisplay

I have my selected template, how do I use it? Just use the "name" in aigenerate or aiclassify like you see in the first example!

You can inspect any template by "rendering" it (this is what the LLM will see):

julia
julia> AITemplate(:JudgeIsItTrue) |> PromptingTools.render

See also: save_template, load_template, load_templates! for more advanced use cases (and the corresponding script in examples/ folder)

source


# PromptingTools.AITemplateMetadataType.

Helper for easy searching and reviewing of templates. Defined on loading of each template.

source


# PromptingTools.AbstractPromptSchemaType.

Defines different prompting styles based on the model training and fine-tuning.

source


# PromptingTools.AbstractToolType.
julia
AbstractTool

Abstract type for all tool types.

Required fields:

source


', 32)), createBaseVNode("div", _hoisted_1, [ _cache[4] || (_cache[4] = createStaticVNode('# PromptingTools.AnthropicSchemaType.
julia
AnthropicSchema <: AbstractAnthropicSchema

AnthropicSchema is the default schema for Anthropic API models (eg, Claude). See more information here.

It uses the following conversation template:

Dict(role="user",content="..."),Dict(role="assistant",content="...")]

system messages are provided as a keyword argument to the API call.

', 11)), createBaseVNode("p", null, [ @@ -28,13 +28,13 @@ function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { ]), _cache[5] || (_cache[5] = createBaseVNode("p", null, [ createBaseVNode("a", { - href: "https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/llm_interface.jl#L327-L341", + href: "https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/llm_interface.jl#L327-L341", target: "_blank", rel: "noreferrer" }, "source") ], -1)) ]), - _cache[48] || (_cache[48] = createStaticVNode('
# PromptingTools.AzureOpenAISchemaType.

AzureOpenAISchema

AzureOpenAISchema() allows user to call Azure OpenAI API. API Reference

Requires two environment variables to be set:

source


# PromptingTools.CerebrasOpenAISchemaType.
julia
CerebrasOpenAISchema

Schema to call the Cerebras API.

Links:

Requires one environment variable to be set:

source


# PromptingTools.ChatMLSchemaType.

ChatMLSchema is used by many open-source chatbots, by OpenAI models (under the hood) and by several models and inferfaces (eg, Ollama, vLLM)

You can explore it on tiktokenizer

It uses the following conversation structure:

<im_start>system\n...<im_end>\n<|im_start|>user\n...<|im_end|>\n<|im_start|>assistant\n...<|im_end|>

source


# PromptingTools.CustomOpenAISchemaType.
julia
CustomOpenAISchema

CustomOpenAISchema() allows user to call any OpenAI-compatible API.

All user needs to do is to pass this schema as the first argument and provide the BASE URL of the API to call (api_kwargs.url).

Example

Assumes that we have a local server running at http://127.0.0.1:8081:

julia
api_key = "..."\nprompt = "Say hi!"\nmsg = aigenerate(CustomOpenAISchema(), prompt; model="my_model", api_key, api_kwargs=(; url="http://127.0.0.1:8081"))

source


# PromptingTools.DataMessageType.
julia
DataMessage

A message type for AI-generated data-based responses, ie, different content than text. Returned by aiextract, and aiextract functions.

Fields

source


# PromptingTools.DatabricksOpenAISchemaType.
julia
DatabricksOpenAISchema

DatabricksOpenAISchema() allows user to call Databricks Foundation Model API. API Reference

Requires two environment variables to be set:

source


# PromptingTools.DeepSeekOpenAISchemaType.
julia
DeepSeekOpenAISchema

Schema to call the DeepSeek API.

Links:

Requires one environment variables to be set:

source


# PromptingTools.FireworksOpenAISchemaType.
julia
FireworksOpenAISchema

Schema to call the Fireworks.ai API.

Links:

Requires one environment variables to be set:

source


# PromptingTools.GoogleSchemaType.

Calls Google's Gemini API. See more information here. It's available only for some regions.

source


# PromptingTools.GroqOpenAISchemaType.
julia
GroqOpenAISchema

Schema to call the groq.com API.

Links:

Requires one environment variables to be set:

source


# PromptingTools.ItemsExtractType.

Extract zero, one or more specified items from the provided data.

source


# PromptingTools.LocalServerOpenAISchemaType.
julia
LocalServerOpenAISchema

Designed to be used with local servers. It's automatically called with model alias "local" (see MODEL_REGISTRY).

This schema is a flavor of CustomOpenAISchema with a url keypreset by global Preference keyLOCAL_SERVER. See?PREFERENCESfor more details on how to change it. It assumes that the server follows OpenAI API conventions (eg,POST /v1/chat/completions`).

Note: Llama.cpp (and hence Llama.jl built on top of it) do NOT support embeddings endpoint! You'll get an address error.

Example

Assumes that we have a local server running at http://127.0.0.1:10897/v1 (port and address used by Llama.jl, "v1" at the end is needed for OpenAI endpoint compatibility):

Three ways to call it:

julia
\n# Use @ai_str with "local" alias\nai"Say hi!"local\n\n# model="local"\naigenerate("Say hi!"; model="local")\n\n# Or set schema explicitly\nconst PT = PromptingTools\nmsg = aigenerate(PT.LocalServerOpenAISchema(), "Say hi!")

How to start a LLM local server? You can use run_server function from Llama.jl. Use a separate Julia session.

julia
using Llama\nmodel = "...path..." # see Llama.jl README how to download one\nrun_server(; model)

To change the default port and address:

julia
# For a permanent change, set the preference:\nusing Preferences\nset_preferences!("LOCAL_SERVER"=>"http://127.0.0.1:10897/v1")\n\n# Or if it's a temporary fix, just change the variable `LOCAL_SERVER`:\nconst PT = PromptingTools\nPT.LOCAL_SERVER = "http://127.0.0.1:10897/v1"

source


# PromptingTools.MaybeExtractType.

Extract a result from the provided data, if any, otherwise set the error and message fields.

Arguments

source


# PromptingTools.MistralOpenAISchemaType.
julia
MistralOpenAISchema

MistralOpenAISchema() allows user to call MistralAI API known for mistral and mixtral models.

It's a flavor of CustomOpenAISchema() with a url preset to https://api.mistral.ai.

Most models have been registered, so you don't even have to specify the schema

Example

Let's call mistral-tiny model:

julia
api_key = "..." # can be set via ENV["MISTRAL_API_KEY"] or via our preference system\nmsg = aigenerate("Say hi!"; model="mistral_tiny", api_key)

See ?PREFERENCES for more details on how to set your API key permanently.

source


# PromptingTools.ModelSpecType.
julia
ModelSpec

A struct that contains information about a model, such as its name, schema, cost per token, etc.

Fields

Example

julia
spec = ModelSpec("gpt-3.5-turbo",\n    OpenAISchema(),\n    0.0015,\n    0.002,\n    "GPT-3.5 Turbo is a 175B parameter model and a common default on the OpenAI API.")\n\n# register it\nPromptingTools.register_model!(spec)

But you can also register any model directly via keyword arguments:

julia
PromptingTools.register_model!(\n    name = "gpt-3.5-turbo",\n    schema = OpenAISchema(),\n    cost_of_token_prompt = 0.0015,\n    cost_of_token_generation = 0.002,\n    description = "GPT-3.5 Turbo is a 175B parameter model and a common default on the OpenAI API.")

source


# PromptingTools.NoSchemaType.

Schema that keeps messages (<:AbstractMessage) and does not transform for any specific model. It used by the first pass of the prompt rendering system (see ?render).

source


# PromptingTools.OllamaManagedSchemaType.

Ollama by default manages different models and their associated prompt schemas when you pass system_prompt and prompt fields to the API.

Warning: It works only for 1 system message and 1 user message, so anything more than that has to be rejected.

If you need to pass more messagese / longer conversational history, you can use define the model-specific schema directly and pass your Ollama requests with raw=true, which disables and templating and schema management by Ollama.

source


# PromptingTools.OllamaSchemaType.

OllamaSchema is the default schema for Olama models.

It uses the following conversation template:

[Dict(role="system",content="..."),Dict(role="user",content="..."),Dict(role="assistant",content="...")]

It's very similar to OpenAISchema, but it appends images differently.

source


# PromptingTools.OpenAISchemaType.

OpenAISchema is the default schema for OpenAI models.

It uses the following conversation template:

[Dict(role="system",content="..."),Dict(role="user",content="..."),Dict(role="assistant",content="...")]

It's recommended to separate sections in your prompt with markdown headers (e.g. `##Answer

`).

source


# PromptingTools.OpenRouterOpenAISchemaType.
julia
OpenRouterOpenAISchema

Schema to call the OpenRouter API.

Links:

Requires one environment variable to be set:

source


# PromptingTools.SaverSchemaType.
julia
SaverSchema <: AbstractTracerSchema

SaverSchema is a schema that automatically saves the conversation to the disk. It's useful for debugging and for persistent logging.

It can be composed with any other schema, eg, TracerSchema to save additional metadata.

Set environment variable LOG_DIR to the directory where you want to save the conversation (see ?PREFERENCES). Conversations are named by the hash of the first message in the conversation to naturally group subsequent conversations together.

If you need to provide logging directory of the file name dynamically, you can provide the following arguments to tracer_kwargs:

To use it automatically, re-register the models you use with the schema wrapped in SaverSchema

See also: meta, unwrap, TracerSchema, initialize_tracer, finalize_tracer

Example

julia
using PromptingTools: TracerSchema, OpenAISchema, SaverSchema\n# This schema will first trace the metadata (change to TraceMessage) and then save the conversation to the disk\n\nwrap_schema = OpenAISchema() |> TracerSchema |> SaverSchema\nconv = aigenerate(wrap_schema,:BlankSystemUser; system="You're a French-speaking assistant!",\n    user="Say hi!", model="gpt-4", api_kwargs=(;temperature=0.1), return_all=true)\n\n# conv is a vector of messages that will be saved to a JSON together with metadata about the template and api_kwargs

If you wanted to enable this automatically for models you use, you can do it like this:

julia
PT.register_model!(; name= "gpt-3.5-turbo", schema=OpenAISchema() |> TracerSchema |> SaverSchema)

Any subsequent calls model="gpt-3.5-turbo" will automatically capture metadata and save the conversation to the disk.

To provide logging file path explicitly, use the tracer_kwargs:

julia
conv = aigenerate(wrap_schema,:BlankSystemUser; system="You're a French-speaking assistant!",\n    user="Say hi!", model="gpt-4", api_kwargs=(;temperature=0.1), return_all=true,\n    tracer_kwargs=(; log_file_path="my_logs/my_log.json"))

source


# PromptingTools.ShareGPTSchemaType.
julia
ShareGPTSchema <: AbstractShareGPTSchema

Frequently used schema for finetuning LLMs. Conversations are recorded as a vector of dicts with keys from and value (similar to OpenAI).

source


# PromptingTools.StreamCallbackType.
julia
StreamCallback

Simplest callback for streaming message, which just prints the content to the output stream defined by out. When streaming is over, it builds the response body from the chunks and returns it as if it was a normal response from the API.

For more complex use cases, you can define your own callback. See the interface description below for more information.

Fields

Interface

streamed_request! composes of:

If you want to implement your own callback, you can create your own methods for the interface functions. Eg, if you want to print the streamed chunks into some specialized sink or Channel, you could define a simple method just for print_content.

Example

julia
using PromptingTools\nconst PT = PromptingTools\n\n# Simplest usage, just provide where to steam the text (we build the callback for you)\nmsg = aigenerate("Count from 1 to 100."; streamcallback = stdout)\n\nstreamcallback = PT.StreamCallback() # record all chunks\nmsg = aigenerate("Count from 1 to 100."; streamcallback)\n# this allows you to inspect each chunk with `streamcallback.chunks`\n\n# Get verbose output with details of each chunk for debugging\nstreamcallback = PT.StreamCallback(; verbose=true, throw_on_error=true)\nmsg = aigenerate("Count from 1 to 10."; streamcallback)

Note: If you provide a StreamCallback object to aigenerate, we will configure it and necessary api_kwargs via configure_callback! unless you specify the flavor field. If you provide a StreamCallback with a specific flavor, we leave all configuration to the user (eg, you need to provide the correct api_kwargs).

source


# PromptingTools.StreamChunkType.
julia
StreamChunk

A chunk of streaming data. A message is composed of multiple chunks.

Fields

source


# PromptingTools.TestEchoAnthropicSchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TestEchoGoogleSchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TestEchoOllamaManagedSchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TestEchoOllamaSchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TestEchoOpenAISchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TogetherOpenAISchemaType.
julia
TogetherOpenAISchema

Schema to call the Together.ai API.

Links:

Requires one environment variables to be set:

source


# PromptingTools.ToolType.
julia
Tool

A tool that can be sent to an LLM for execution ("function calling").

Arguments

See also: AbstractTool, tool_call_signature

source


# PromptingTools.ToolMethod.
julia
Tool(callable::Union{Function, Type, Method}; kwargs...)

Create a Tool from a callable object (function, type, or method).

Arguments

Returns

Examples

julia
# Create a tool from a function\ntool = Tool(my_function)\n\n# Create a tool from a type\ntool = Tool(MyStruct)

source


# PromptingTools.TracerMessageType.
julia
TracerMessage{T <: Union{AbstractChatMessage, AbstractDataMessage}} <: AbstractTracerMessage

A mutable wrapper message designed for tracing the flow of messages through the system, allowing for iterative updates and providing additional metadata for observability.

Fields

This structure is particularly useful for debugging, monitoring, and auditing the flow of messages in systems that involve complex interactions or asynchronous processing.

All fields are optional besides the object.

Useful methods: pprint (pretty prints the underlying message), unwrap (to get the object out of tracer), align_tracer! (to set all shared IDs in a vector of tracers to the same), istracermessage to check if given message is an AbstractTracerMessage

Example

julia
wrap_schema = PT.TracerSchema(PT.OpenAISchema())\nmsg = aigenerate(wrap_schema, "Say hi!"; model = "gpt4t")\nmsg # isa TracerMessage\nmsg.content # access content like if it was the message

source


# PromptingTools.TracerMessageLikeType.
julia
TracerMessageLike{T <: Any} <: AbstractTracer

A mutable structure designed for general-purpose tracing within the system, capable of handling any type of object that is part of the AI Conversation. It provides a flexible way to track and annotate objects as they move through different parts of the system, facilitating debugging, monitoring, and auditing.

Fields

This structure is particularly useful for systems that involve complex interactions or asynchronous processing, where tracking the flow and transformation of objects is crucial.

All fields are optional besides the object.

source


# PromptingTools.TracerSchemaType.
julia
TracerSchema <: AbstractTracerSchema

A schema designed to wrap another schema, enabling pre- and post-execution callbacks for tracing and additional functionalities. This type is specifically utilized within the TracerMessage type to trace the execution flow, facilitating observability and debugging in complex conversational AI systems.

The TracerSchema acts as a middleware, allowing developers to insert custom logic before and after the execution of the primary schema's functionality. This can include logging, performance measurement, or any other form of tracing required to understand or improve the execution flow.

TracerSchema automatically wraps messages in TracerMessage type, which has several important fields, eg,

See also: meta, unwrap, SaverSchema, initialize_tracer, finalize_tracer

Example

julia
wrap_schema = TracerSchema(OpenAISchema())\nmsg = aigenerate(wrap_schema, "Say hi!"; model="gpt-4")\n# output type should be TracerMessage\nmsg isa TracerMessage

You can define your own tracer schema and the corresponding methods: initialize_tracer, finalize_tracer. See src/llm_tracer.jl

source


# PromptingTools.UserMessageType.
julia
UserMessage

A message type for user-generated text-based responses. Consumed by ai* functions to generate responses.

Fields

source


# PromptingTools.UserMessageWithImagesType.
julia
UserMessageWithImages

A message type for user-generated text-based responses with images. Consumed by ai* functions to generate responses.

Fields

source


# PromptingTools.UserMessageWithImagesMethod.

Construct UserMessageWithImages with 1 or more images. Images can be either URLs or local paths.

source


# PromptingTools.X123Type.

With docstring

source


# OpenAI.create_chatMethod.
julia
OpenAI.create_chat(schema::CustomOpenAISchema,\n    api_key::AbstractString,\n    model::AbstractString,\n    conversation;\n    http_kwargs::NamedTuple = NamedTuple(),\n    streamcallback::Any = nothing,\n    url::String = "http://localhost:8080",\n    kwargs...)

Dispatch to the OpenAI.create_chat function, for any OpenAI-compatible API.

It expects url keyword argument. Provide it to the aigenerate function via api_kwargs=(; url="my-url")

It will forward your query to the "chat/completions" endpoint of the base URL that you provided (=url).

source


# OpenAI.create_chatMethod.
julia
OpenAI.create_chat(schema::LocalServerOpenAISchema,\n    api_key::AbstractString,\n    model::AbstractString,\n    conversation;\n    url::String = "http://localhost:8080",\n    kwargs...)

Dispatch to the OpenAI.create_chat function, but with the LocalServer API parameters, ie, defaults to url specified by the LOCAL_SERVERpreference. See?PREFERENCES

source


# OpenAI.create_chatMethod.
julia
OpenAI.create_chat(schema::MistralOpenAISchema,

api_key::AbstractString, model::AbstractString, conversation; url::String="https://api.mistral.ai/v1", kwargs...)

Dispatch to the OpenAI.create_chat function, but with the MistralAI API parameters.

It tries to access the MISTRALAI_API_KEY ENV variable, but you can also provide it via the api_key keyword argument.

source


# PromptingTools.aiclassifyMethod.
julia
aiclassify(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiclassify call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

source


', 87)), + _cache[48] || (_cache[48] = createStaticVNode('
# PromptingTools.AzureOpenAISchemaType.

AzureOpenAISchema

AzureOpenAISchema() allows user to call Azure OpenAI API. API Reference

Requires two environment variables to be set:

source


# PromptingTools.CerebrasOpenAISchemaType.
julia
CerebrasOpenAISchema

Schema to call the Cerebras API.

Links:

Requires one environment variable to be set:

source


# PromptingTools.ChatMLSchemaType.

ChatMLSchema is used by many open-source chatbots, by OpenAI models (under the hood) and by several models and inferfaces (eg, Ollama, vLLM)

You can explore it on tiktokenizer

It uses the following conversation structure:

<im_start>system\n...<im_end>\n<|im_start|>user\n...<|im_end|>\n<|im_start|>assistant\n...<|im_end|>

source


# PromptingTools.CustomOpenAISchemaType.
julia
CustomOpenAISchema

CustomOpenAISchema() allows user to call any OpenAI-compatible API.

All user needs to do is to pass this schema as the first argument and provide the BASE URL of the API to call (api_kwargs.url).

Example

Assumes that we have a local server running at http://127.0.0.1:8081:

julia
api_key = "..."\nprompt = "Say hi!"\nmsg = aigenerate(CustomOpenAISchema(), prompt; model="my_model", api_key, api_kwargs=(; url="http://127.0.0.1:8081"))

source


# PromptingTools.DataMessageType.
julia
DataMessage

A message type for AI-generated data-based responses, ie, different content than text. Returned by aiextract, and aiextract functions.

Fields

source


# PromptingTools.DatabricksOpenAISchemaType.
julia
DatabricksOpenAISchema

DatabricksOpenAISchema() allows user to call Databricks Foundation Model API. API Reference

Requires two environment variables to be set:

source


# PromptingTools.DeepSeekOpenAISchemaType.
julia
DeepSeekOpenAISchema

Schema to call the DeepSeek API.

Links:

Requires one environment variables to be set:

source


# PromptingTools.FireworksOpenAISchemaType.
julia
FireworksOpenAISchema

Schema to call the Fireworks.ai API.

Links:

Requires one environment variables to be set:

source


# PromptingTools.GoogleSchemaType.

Calls Google's Gemini API. See more information here. It's available only for some regions.

source


# PromptingTools.GroqOpenAISchemaType.
julia
GroqOpenAISchema

Schema to call the groq.com API.

Links:

Requires one environment variables to be set:

source


# PromptingTools.ItemsExtractType.

Extract zero, one or more specified items from the provided data.

source


# PromptingTools.LocalServerOpenAISchemaType.
julia
LocalServerOpenAISchema

Designed to be used with local servers. It's automatically called with model alias "local" (see MODEL_REGISTRY).

This schema is a flavor of CustomOpenAISchema with a url keypreset by global Preference keyLOCAL_SERVER. See?PREFERENCESfor more details on how to change it. It assumes that the server follows OpenAI API conventions (eg,POST /v1/chat/completions`).

Note: Llama.cpp (and hence Llama.jl built on top of it) do NOT support embeddings endpoint! You'll get an address error.

Example

Assumes that we have a local server running at http://127.0.0.1:10897/v1 (port and address used by Llama.jl, "v1" at the end is needed for OpenAI endpoint compatibility):

Three ways to call it:

julia
\n# Use @ai_str with "local" alias\nai"Say hi!"local\n\n# model="local"\naigenerate("Say hi!"; model="local")\n\n# Or set schema explicitly\nconst PT = PromptingTools\nmsg = aigenerate(PT.LocalServerOpenAISchema(), "Say hi!")

How to start a LLM local server? You can use run_server function from Llama.jl. Use a separate Julia session.

julia
using Llama\nmodel = "...path..." # see Llama.jl README how to download one\nrun_server(; model)

To change the default port and address:

julia
# For a permanent change, set the preference:\nusing Preferences\nset_preferences!("LOCAL_SERVER"=>"http://127.0.0.1:10897/v1")\n\n# Or if it's a temporary fix, just change the variable `LOCAL_SERVER`:\nconst PT = PromptingTools\nPT.LOCAL_SERVER = "http://127.0.0.1:10897/v1"

source


# PromptingTools.MaybeExtractType.

Extract a result from the provided data, if any, otherwise set the error and message fields.

Arguments

source


# PromptingTools.MistralOpenAISchemaType.
julia
MistralOpenAISchema

MistralOpenAISchema() allows user to call MistralAI API known for mistral and mixtral models.

It's a flavor of CustomOpenAISchema() with a url preset to https://api.mistral.ai.

Most models have been registered, so you don't even have to specify the schema

Example

Let's call mistral-tiny model:

julia
api_key = "..." # can be set via ENV["MISTRAL_API_KEY"] or via our preference system\nmsg = aigenerate("Say hi!"; model="mistral_tiny", api_key)

See ?PREFERENCES for more details on how to set your API key permanently.

source


# PromptingTools.ModelSpecType.
julia
ModelSpec

A struct that contains information about a model, such as its name, schema, cost per token, etc.

Fields

Example

julia
spec = ModelSpec("gpt-3.5-turbo",\n    OpenAISchema(),\n    0.0015,\n    0.002,\n    "GPT-3.5 Turbo is a 175B parameter model and a common default on the OpenAI API.")\n\n# register it\nPromptingTools.register_model!(spec)

But you can also register any model directly via keyword arguments:

julia
PromptingTools.register_model!(\n    name = "gpt-3.5-turbo",\n    schema = OpenAISchema(),\n    cost_of_token_prompt = 0.0015,\n    cost_of_token_generation = 0.002,\n    description = "GPT-3.5 Turbo is a 175B parameter model and a common default on the OpenAI API.")

source


# PromptingTools.NoSchemaType.

Schema that keeps messages (<:AbstractMessage) and does not transform for any specific model. It used by the first pass of the prompt rendering system (see ?render).

source


# PromptingTools.OllamaManagedSchemaType.

Ollama by default manages different models and their associated prompt schemas when you pass system_prompt and prompt fields to the API.

Warning: It works only for 1 system message and 1 user message, so anything more than that has to be rejected.

If you need to pass more messagese / longer conversational history, you can use define the model-specific schema directly and pass your Ollama requests with raw=true, which disables and templating and schema management by Ollama.

source


# PromptingTools.OllamaSchemaType.

OllamaSchema is the default schema for Olama models.

It uses the following conversation template:

[Dict(role="system",content="..."),Dict(role="user",content="..."),Dict(role="assistant",content="...")]

It's very similar to OpenAISchema, but it appends images differently.

source


# PromptingTools.OpenAISchemaType.

OpenAISchema is the default schema for OpenAI models.

It uses the following conversation template:

[Dict(role="system",content="..."),Dict(role="user",content="..."),Dict(role="assistant",content="...")]

It's recommended to separate sections in your prompt with markdown headers (e.g. `##Answer

`).

source


# PromptingTools.OpenRouterOpenAISchemaType.
julia
OpenRouterOpenAISchema

Schema to call the OpenRouter API.

Links:

Requires one environment variable to be set:

source


# PromptingTools.SaverSchemaType.
julia
SaverSchema <: AbstractTracerSchema

SaverSchema is a schema that automatically saves the conversation to the disk. It's useful for debugging and for persistent logging.

It can be composed with any other schema, eg, TracerSchema to save additional metadata.

Set environment variable LOG_DIR to the directory where you want to save the conversation (see ?PREFERENCES). Conversations are named by the hash of the first message in the conversation to naturally group subsequent conversations together.

If you need to provide logging directory of the file name dynamically, you can provide the following arguments to tracer_kwargs:

To use it automatically, re-register the models you use with the schema wrapped in SaverSchema

See also: meta, unwrap, TracerSchema, initialize_tracer, finalize_tracer

Example

julia
using PromptingTools: TracerSchema, OpenAISchema, SaverSchema\n# This schema will first trace the metadata (change to TraceMessage) and then save the conversation to the disk\n\nwrap_schema = OpenAISchema() |> TracerSchema |> SaverSchema\nconv = aigenerate(wrap_schema,:BlankSystemUser; system="You're a French-speaking assistant!",\n    user="Say hi!", model="gpt-4", api_kwargs=(;temperature=0.1), return_all=true)\n\n# conv is a vector of messages that will be saved to a JSON together with metadata about the template and api_kwargs

If you wanted to enable this automatically for models you use, you can do it like this:

julia
PT.register_model!(; name= "gpt-3.5-turbo", schema=OpenAISchema() |> TracerSchema |> SaverSchema)

Any subsequent calls model="gpt-3.5-turbo" will automatically capture metadata and save the conversation to the disk.

To provide logging file path explicitly, use the tracer_kwargs:

julia
conv = aigenerate(wrap_schema,:BlankSystemUser; system="You're a French-speaking assistant!",\n    user="Say hi!", model="gpt-4", api_kwargs=(;temperature=0.1), return_all=true,\n    tracer_kwargs=(; log_file_path="my_logs/my_log.json"))

source


# PromptingTools.ShareGPTSchemaType.
julia
ShareGPTSchema <: AbstractShareGPTSchema

Frequently used schema for finetuning LLMs. Conversations are recorded as a vector of dicts with keys from and value (similar to OpenAI).

source


# PromptingTools.StreamCallbackType.
julia
StreamCallback

Simplest callback for streaming message, which just prints the content to the output stream defined by out. When streaming is over, it builds the response body from the chunks and returns it as if it was a normal response from the API.

For more complex use cases, you can define your own callback. See the interface description below for more information.

Fields

Interface

streamed_request! composes of:

If you want to implement your own callback, you can create your own methods for the interface functions. Eg, if you want to print the streamed chunks into some specialized sink or Channel, you could define a simple method just for print_content.

Example

julia
using PromptingTools\nconst PT = PromptingTools\n\n# Simplest usage, just provide where to steam the text (we build the callback for you)\nmsg = aigenerate("Count from 1 to 100."; streamcallback = stdout)\n\nstreamcallback = PT.StreamCallback() # record all chunks\nmsg = aigenerate("Count from 1 to 100."; streamcallback)\n# this allows you to inspect each chunk with `streamcallback.chunks`\n\n# Get verbose output with details of each chunk for debugging\nstreamcallback = PT.StreamCallback(; verbose=true, throw_on_error=true)\nmsg = aigenerate("Count from 1 to 10."; streamcallback)

Note: If you provide a StreamCallback object to aigenerate, we will configure it and necessary api_kwargs via configure_callback! unless you specify the flavor field. If you provide a StreamCallback with a specific flavor, we leave all configuration to the user (eg, you need to provide the correct api_kwargs).

source


# PromptingTools.StreamChunkType.
julia
StreamChunk

A chunk of streaming data. A message is composed of multiple chunks.

Fields

source


# PromptingTools.TestEchoAnthropicSchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TestEchoGoogleSchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TestEchoOllamaManagedSchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TestEchoOllamaSchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TestEchoOpenAISchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TogetherOpenAISchemaType.
julia
TogetherOpenAISchema

Schema to call the Together.ai API.

Links:

Requires one environment variables to be set:

source


# PromptingTools.ToolType.
julia
Tool

A tool that can be sent to an LLM for execution ("function calling").

Arguments

See also: AbstractTool, tool_call_signature

source


# PromptingTools.ToolMethod.
julia
Tool(callable::Union{Function, Type, Method}; kwargs...)

Create a Tool from a callable object (function, type, or method).

Arguments

Returns

Examples

julia
# Create a tool from a function\ntool = Tool(my_function)\n\n# Create a tool from a type\ntool = Tool(MyStruct)

source


# PromptingTools.TracerMessageType.
julia
TracerMessage{T <: Union{AbstractChatMessage, AbstractDataMessage}} <: AbstractTracerMessage

A mutable wrapper message designed for tracing the flow of messages through the system, allowing for iterative updates and providing additional metadata for observability.

Fields

This structure is particularly useful for debugging, monitoring, and auditing the flow of messages in systems that involve complex interactions or asynchronous processing.

All fields are optional besides the object.

Useful methods: pprint (pretty prints the underlying message), unwrap (to get the object out of tracer), align_tracer! (to set all shared IDs in a vector of tracers to the same), istracermessage to check if given message is an AbstractTracerMessage

Example

julia
wrap_schema = PT.TracerSchema(PT.OpenAISchema())\nmsg = aigenerate(wrap_schema, "Say hi!"; model = "gpt4t")\nmsg # isa TracerMessage\nmsg.content # access content like if it was the message

source


# PromptingTools.TracerMessageLikeType.
julia
TracerMessageLike{T <: Any} <: AbstractTracer

A mutable structure designed for general-purpose tracing within the system, capable of handling any type of object that is part of the AI Conversation. It provides a flexible way to track and annotate objects as they move through different parts of the system, facilitating debugging, monitoring, and auditing.

Fields

This structure is particularly useful for systems that involve complex interactions or asynchronous processing, where tracking the flow and transformation of objects is crucial.

All fields are optional besides the object.

source


# PromptingTools.TracerSchemaType.
julia
TracerSchema <: AbstractTracerSchema

A schema designed to wrap another schema, enabling pre- and post-execution callbacks for tracing and additional functionalities. This type is specifically utilized within the TracerMessage type to trace the execution flow, facilitating observability and debugging in complex conversational AI systems.

The TracerSchema acts as a middleware, allowing developers to insert custom logic before and after the execution of the primary schema's functionality. This can include logging, performance measurement, or any other form of tracing required to understand or improve the execution flow.

TracerSchema automatically wraps messages in TracerMessage type, which has several important fields, eg,

See also: meta, unwrap, SaverSchema, initialize_tracer, finalize_tracer

Example

julia
wrap_schema = TracerSchema(OpenAISchema())\nmsg = aigenerate(wrap_schema, "Say hi!"; model="gpt-4")\n# output type should be TracerMessage\nmsg isa TracerMessage

You can define your own tracer schema and the corresponding methods: initialize_tracer, finalize_tracer. See src/llm_tracer.jl

source


# PromptingTools.UserMessageType.
julia
UserMessage

A message type for user-generated text-based responses. Consumed by ai* functions to generate responses.

Fields

source


# PromptingTools.UserMessageWithImagesType.
julia
UserMessageWithImages

A message type for user-generated text-based responses with images. Consumed by ai* functions to generate responses.

Fields

source


# PromptingTools.UserMessageWithImagesMethod.

Construct UserMessageWithImages with 1 or more images. Images can be either URLs or local paths.

source


# PromptingTools.X123Type.

With docstring

source


# OpenAI.create_chatMethod.
julia
OpenAI.create_chat(schema::CustomOpenAISchema,\n    api_key::AbstractString,\n    model::AbstractString,\n    conversation;\n    http_kwargs::NamedTuple = NamedTuple(),\n    streamcallback::Any = nothing,\n    url::String = "http://localhost:8080",\n    kwargs...)

Dispatch to the OpenAI.create_chat function, for any OpenAI-compatible API.

It expects url keyword argument. Provide it to the aigenerate function via api_kwargs=(; url="my-url")

It will forward your query to the "chat/completions" endpoint of the base URL that you provided (=url).

source


# OpenAI.create_chatMethod.
julia
OpenAI.create_chat(schema::LocalServerOpenAISchema,\n    api_key::AbstractString,\n    model::AbstractString,\n    conversation;\n    url::String = "http://localhost:8080",\n    kwargs...)

Dispatch to the OpenAI.create_chat function, but with the LocalServer API parameters, ie, defaults to url specified by the LOCAL_SERVERpreference. See?PREFERENCES

source


# OpenAI.create_chatMethod.
julia
OpenAI.create_chat(schema::MistralOpenAISchema,

api_key::AbstractString, model::AbstractString, conversation; url::String="https://api.mistral.ai/v1", kwargs...)

Dispatch to the OpenAI.create_chat function, but with the MistralAI API parameters.

It tries to access the MISTRALAI_API_KEY ENV variable, but you can also provide it via the api_key keyword argument.

source


# PromptingTools.aiclassifyMethod.
julia
aiclassify(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiclassify call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

source


', 87)), createBaseVNode("div", _hoisted_2, [ _cache[10] || (_cache[10] = createStaticVNode('# PromptingTools.aiclassifyMethod.
julia
aiclassify(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;\n    choices::AbstractVector{T} = ["true", "false", "unknown"],\n    model::AbstractString = MODEL_CHAT,\n    api_kwargs::NamedTuple = NamedTuple(),\n    token_ids_map::Union{Nothing, Dict{<:AbstractString, <:Integer}} = nothing,\n    kwargs...) where {T <: Union{AbstractString, Tuple{<:AbstractString, <:AbstractString}}}

Classifies the given prompt/statement into an arbitrary list of choices, which must be only the choices (vector of strings) or choices and descriptions are provided (vector of tuples, ie, ("choice","description")).

It's quick and easy option for "routing" and similar use cases, as it exploits the logit bias trick and outputs only 1 token. classify into an arbitrary list of categories (including with descriptions). It's quick and easy option for "routing" and similar use cases, as it exploits the logit bias trick, so it outputs only 1 token.

', 9)), createBaseVNode("p", null, [ @@ -44,9 +44,9 @@ function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { createBaseVNode("code", null, toDisplayString(_ctx.choices), 1), _cache[9] || (_cache[9] = createTextVNode(") that will be replaced with the encoded choices")) ]), - _cache[11] || (_cache[11] = createStaticVNode('

Choices are rewritten into an enumerated list and mapped to a few known OpenAI tokens (maximum of 40 choices supported). Mapping of token IDs for GPT3.5/4 are saved in variable OPENAI_TOKEN_IDS.

It uses Logit bias trick and limits the output to 1 token to force the model to output only true/false/unknown. Credit for the idea goes to AAAzzam.

Arguments

Example

Given a user input, pick one of the two provided categories:

julia
choices = ["animal", "plant"]\ninput = "Palm tree"\naiclassify(:InputClassifier; choices, input)

Choices with descriptions provided as tuples:

julia
choices = [("A", "any animal or creature"), ("P", "any plant or tree"), ("O", "anything else")]\n\n# try the below inputs:\ninput = "spider" # -> returns "A" for any animal or creature\ninput = "daphodil" # -> returns "P" for any plant or tree\ninput = "castle" # -> returns "O" for everything else\naiclassify(:InputClassifier; choices, input)

You could also use this function for routing questions to different endpoints (notice the different template and placeholder used), eg,

julia
choices = [("A", "any question about animal or creature"), ("P", "any question about plant or tree"), ("O", "anything else")]\nquestion = "how many spiders are there?"\nmsg = aiclassify(:QuestionRouter; choices, question)\n# "A"

You can still use a simple true/false classification:

julia
aiclassify("Is two plus two four?") # true\naiclassify("Is two plus three a vegetable on Mars?") # false

aiclassify returns only true/false/unknown. It's easy to get the proper Bool output type out with tryparse, eg,

julia
tryparse(Bool, aiclassify("Is two plus two four?")) isa Bool # true

Output of type Nothing marks that the model couldn't classify the statement as true/false.

Ideally, we would like to re-use some helpful system prompt to get more accurate responses. For this reason we have templates, eg, :JudgeIsItTrue. By specifying the template, we can provide our statement as the expected variable (it in this case) See that the model now correctly classifies the statement as "unknown".

julia
aiclassify(:JudgeIsItTrue; it = "Is two plus three a vegetable on Mars?") # unknown

For better results, use higher quality models like gpt4, eg,

julia
aiclassify(:JudgeIsItTrue;\n    it = "If I had two apples and I got three more, I have five apples now.",\n    model = "gpt4") # true

source

', 21)) + _cache[11] || (_cache[11] = createStaticVNode('

Choices are rewritten into an enumerated list and mapped to a few known OpenAI tokens (maximum of 40 choices supported). Mapping of token IDs for GPT3.5/4 are saved in variable OPENAI_TOKEN_IDS.

It uses Logit bias trick and limits the output to 1 token to force the model to output only true/false/unknown. Credit for the idea goes to AAAzzam.

Arguments

Example

Given a user input, pick one of the two provided categories:

julia
choices = ["animal", "plant"]\ninput = "Palm tree"\naiclassify(:InputClassifier; choices, input)

Choices with descriptions provided as tuples:

julia
choices = [("A", "any animal or creature"), ("P", "any plant or tree"), ("O", "anything else")]\n\n# try the below inputs:\ninput = "spider" # -> returns "A" for any animal or creature\ninput = "daphodil" # -> returns "P" for any plant or tree\ninput = "castle" # -> returns "O" for everything else\naiclassify(:InputClassifier; choices, input)

You could also use this function for routing questions to different endpoints (notice the different template and placeholder used), eg,

julia
choices = [("A", "any question about animal or creature"), ("P", "any question about plant or tree"), ("O", "anything else")]\nquestion = "how many spiders are there?"\nmsg = aiclassify(:QuestionRouter; choices, question)\n# "A"

You can still use a simple true/false classification:

julia
aiclassify("Is two plus two four?") # true\naiclassify("Is two plus three a vegetable on Mars?") # false

aiclassify returns only true/false/unknown. It's easy to get the proper Bool output type out with tryparse, eg,

julia
tryparse(Bool, aiclassify("Is two plus two four?")) isa Bool # true

Output of type Nothing marks that the model couldn't classify the statement as true/false.

Ideally, we would like to re-use some helpful system prompt to get more accurate responses. For this reason we have templates, eg, :JudgeIsItTrue. By specifying the template, we can provide our statement as the expected variable (it in this case) See that the model now correctly classifies the statement as "unknown".

julia
aiclassify(:JudgeIsItTrue; it = "Is two plus three a vegetable on Mars?") # unknown

For better results, use higher quality models like gpt4, eg,

julia
aiclassify(:JudgeIsItTrue;\n    it = "If I had two apples and I got three more, I have five apples now.",\n    model = "gpt4") # true

source

', 21)) ]), - _cache[49] || (_cache[49] = createStaticVNode('
# PromptingTools.aiembedFunction.
julia
aiembed(tracer_schema::AbstractTracerSchema,\n    doc_or_docs::Union{AbstractString, AbstractVector{<:AbstractString}}, postprocess::Function = identity;\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiembed call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

source


# PromptingTools.aiembedMethod.
julia
aiembed(prompt_schema::AbstractOllamaManagedSchema,\n        doc_or_docs::Union{AbstractString, AbstractVector{<:AbstractString}},\n        postprocess::F = identity;\n        verbose::Bool = true,\n        api_key::String = "",\n        model::String = MODEL_EMBEDDING,\n        http_kwargs::NamedTuple = (retry_non_idempotent = true,\n                                   retries = 5,\n                                   readtimeout = 120),\n        api_kwargs::NamedTuple = NamedTuple(),\n        kwargs...) where {F <: Function}

The aiembed function generates embeddings for the given input using a specified model and returns a message object containing the embeddings, status, token count, and elapsed time.

Arguments

Returns

Note: Ollama API currently does not return the token count, so it's set to (0,0)

Example

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\n\nmsg = aiembed(schema, "Hello World"; model="openhermes2.5-mistral")\nmsg.content # 4096-element JSON3.Array{Float64...

We can embed multiple strings at once and they will be hcat into a matrix (ie, each column corresponds to one string)

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\n\nmsg = aiembed(schema, ["Hello World", "How are you?"]; model="openhermes2.5-mistral")\nmsg.content # 4096×2 Matrix{Float64}:

If you plan to calculate the cosine distance between embeddings, you can normalize them first:

julia
const PT = PromptingTools\nusing LinearAlgebra\nschema = PT.OllamaManagedSchema()\n\nmsg = aiembed(schema, ["embed me", "and me too"], LinearAlgebra.normalize; model="openhermes2.5-mistral")\n\n# calculate cosine distance between the two normalized embeddings as a simple dot product\nmsg.content' * msg.content[:, 1] # [1.0, 0.34]

Similarly, you can use the postprocess argument to materialize the data from JSON3.Object by using postprocess = copy

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\n\nmsg = aiembed(schema, "Hello World", copy; model="openhermes2.5-mistral")\nmsg.content # 4096-element Vector{Float64}

source


# PromptingTools.aiembedMethod.
julia
aiembed(prompt_schema::AbstractOpenAISchema,\n        doc_or_docs::Union{AbstractString, AbstractVector{<:AbstractString}},\n        postprocess::F = identity;\n        verbose::Bool = true,\n        api_key::String = OPENAI_API_KEY,\n        model::String = MODEL_EMBEDDING, \n        http_kwargs::NamedTuple = (retry_non_idempotent = true,\n                                   retries = 5,\n                                   readtimeout = 120),\n        api_kwargs::NamedTuple = NamedTuple(),\n        kwargs...) where {F <: Function}

The aiembed function generates embeddings for the given input using a specified model and returns a message object containing the embeddings, status, token count, and elapsed time.

Arguments

Returns

Example

julia
msg = aiembed("Hello World")\nmsg.content # 1536-element JSON3.Array{Float64...

We can embed multiple strings at once and they will be hcat into a matrix (ie, each column corresponds to one string)

julia
msg = aiembed(["Hello World", "How are you?"])\nmsg.content # 1536×2 Matrix{Float64}:

If you plan to calculate the cosine distance between embeddings, you can normalize them first:

julia
using LinearAlgebra\nmsg = aiembed(["embed me", "and me too"], LinearAlgebra.normalize)\n\n# calculate cosine distance between the two normalized embeddings as a simple dot product\nmsg.content' * msg.content[:, 1] # [1.0, 0.787]

source


# PromptingTools.aiextractMethod.
julia
aiextract(prompt_schema::AbstractAnthropicSchema, prompt::ALLOWED_PROMPT_TYPE;\n    return_type::Union{Type, AbstractTool, Vector},\n    verbose::Bool = true,\n    api_key::String = ANTHROPIC_API_KEY,\n    model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    no_system_message::Bool = false,\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = NamedTuple(),\n    cache::Union{Nothing, Symbol} = nothing,\n    kwargs...)

Extract required information (defined by a struct return_type) from the provided prompt by leveraging Anthropic's function calling mode.

This is a perfect solution for extracting structured information from text (eg, extract organization names in news articles, etc.).

Read best practics here.

It's effectively a light wrapper around aigenerate call, which requires additional keyword argument return_type to be provided and will enforce the model outputs to adhere to it.

Arguments

Note: At the moment, the cache is only allowed for prompt segments over 1024 tokens (in some cases, over 2048 tokens). You'll get an error if you try to cache short prompts.

Returns

If return_all=false (default):

If return_all=true:

See also: tool_call_signature, MaybeExtract, ItemsExtract, aigenerate

Example

Do you want to extract some specific measurements from a text like age, weight and height? You need to define the information you need as a struct (return_type):

"Person's age, height, and weight."\nstruct MyMeasurement\n    age::Int # required\n    height::Union{Int,Nothing} # optional\n    weight::Union{Nothing,Float64} # optional\nend\nmsg = aiextract("James is 30, weighs 80kg. He's 180cm tall."; model="claudeh", return_type=MyMeasurement)\n# PromptingTools.DataMessage(MyMeasurement)\nmsg.content\n# MyMeasurement(30, 180, 80.0)

The fields that allow Nothing are marked as optional in the schema:

msg = aiextract("James is 30."; model="claudeh", return_type=MyMeasurement)\n# MyMeasurement(30, nothing, nothing)

If there are multiple items you want to extract, define a wrapper struct to get a Vector of MyMeasurement:

struct ManyMeasurements\n    measurements::Vector{MyMeasurement}\nend\n\nmsg = aiextract("James is 30, weighs 80kg. He's 180cm tall. Then Jack is 19 but really tall - over 190!"; model="claudeh", return_type=ManyMeasurements)\n\nmsg.content.measurements\n# 2-element Vector{MyMeasurement}:\n#  MyMeasurement(30, 180, 80.0)\n#  MyMeasurement(19, 190, nothing)

Or you can use the convenience wrapper ItemsExtract to extract multiple measurements (zero, one or more):

julia
using PromptingTools: ItemsExtract\n\nreturn_type = ItemsExtract{MyMeasurement}\nmsg = aiextract("James is 30, weighs 80kg. He's 180cm tall. Then Jack is 19 but really tall - over 190!"; model="claudeh", return_type)\n\nmsg.content.items # see the extracted items

Or if you want your extraction to fail gracefully when data isn't found, use MaybeExtract{T} wrapper (this trick is inspired by the Instructor package!):

using PromptingTools: MaybeExtract\n\nreturn_type = MaybeExtract{MyMeasurement}\n# Effectively the same as:\n# struct MaybeExtract{T}\n#     result::Union{T, Nothing} // The result of the extraction\n#     error::Bool // true if a result is found, false otherwise\n#     message::Union{Nothing, String} // Only present if no result is found, should be short and concise\n# end\n\n# If LLM extraction fails, it will return a Dict with `error` and `message` fields instead of the result!\nmsg = aiextract("Extract measurements from the text: I am giraffe"; model="claudeo", return_type)\nmsg.content\n# Output: MaybeExtract{MyMeasurement}(nothing, true, "I'm sorry, but your input of "I am giraffe" does not contain any information about a person's age, height or weight measurements that I can extract. To use this tool, please provide a statement that includes at least the person's age, and optionally their height in inches and weight in pounds. Without that information, I am unable to extract the requested measurements.")

That way, you can handle the error gracefully and get a reason why extraction failed (in msg.content.message).

However, this can fail with weaker models like claudeh, so we can apply some of our prompt templates with embedding reasoning step:

julia
msg = aiextract(:ExtractDataCoTXML; data="I am giraffe", model="claudeh", return_type)\nmsg.content\n# Output: MaybeExtract{MyMeasurement}(nothing, true, "The provided data does not contain the expected information about a person's age, height, and weight.")

Note that when using a prompt template, we provide data for the extraction as the corresponding placeholder (see aitemplates("extract") for documentation of this template).

Note that the error message refers to a giraffe not being a human, because in our MyMeasurement docstring, we said that it's for people!

Example of using a vector of field names with aiextract

julia
fields = [:location, :temperature => Float64, :condition => String]\nmsg = aiextract("Extract the following information from the text: location, temperature, condition. Text: The weather in New York is sunny and 72.5 degrees Fahrenheit."; \nreturn_type = fields, model="claudeh")

Or simply call aiextract("some text"; return_type = [:reasoning,:answer], model="claudeh") to get a Chain of Thought reasoning for extraction task.

It will be returned it a new generated type, which you can check with PromptingTools.isextracted(msg.content) == true to confirm the data has been extracted correctly.

This new syntax also allows you to provide field-level descriptions, which will be passed to the model.

julia
fields_with_descriptions = [\n    :location,\n    :temperature => Float64,\n    :temperature__description => "Temperature in degrees Fahrenheit",\n    :condition => String,\n    :condition__description => "Current weather condition (e.g., sunny, rainy, cloudy)"\n]\nmsg = aiextract("The weather in New York is sunny and 72.5 degrees Fahrenheit."; return_type = fields_with_descriptions, model="claudeh")

source


# PromptingTools.aiextractMethod.
julia
aiextract(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;\n    return_type::Union{Type, AbstractTool, Vector},\n    verbose::Bool = true,\n    api_key::String = OPENAI_API_KEY,\n    model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = (;\n        tool_choice = nothing),\n    strict::Union{Nothing, Bool} = nothing,\n    kwargs...)

Extract required information (defined by a struct return_type) from the provided prompt by leveraging OpenAI function calling mode.

This is a perfect solution for extracting structured information from text (eg, extract organization names in news articles, etc.)

It's effectively a light wrapper around aigenerate call, which requires additional keyword argument return_type to be provided and will enforce the model outputs to adhere to it.

Arguments

Returns

If return_all=false (default):

If return_all=true:

Note: msg.content can be a single object (if a single tool is used) or a vector of objects (if multiple tools are used)!

See also: tool_call_signature, MaybeExtract, ItemsExtract, aigenerate, generate_struct

Example

Do you want to extract some specific measurements from a text like age, weight and height? You need to define the information you need as a struct (return_type):

"Person's age, height, and weight."\nstruct MyMeasurement\n    age::Int # required\n    height::Union{Int,Nothing} # optional\n    weight::Union{Nothing,Float64} # optional\nend\nmsg = aiextract("James is 30, weighs 80kg. He's 180cm tall."; return_type=MyMeasurement)\n# PromptingTools.DataMessage(MyMeasurement)\nmsg.content\n# MyMeasurement(30, 180, 80.0)

The fields that allow Nothing are marked as optional in the schema:

msg = aiextract("James is 30."; return_type=MyMeasurement)\n# MyMeasurement(30, nothing, nothing)

If there are multiple items you want to extract, define a wrapper struct to get a Vector of MyMeasurement:

struct ManyMeasurements\n    measurements::Vector{MyMeasurement}\nend\n\nmsg = aiextract("James is 30, weighs 80kg. He's 180cm tall. Then Jack is 19 but really tall - over 190!"; return_type=ManyMeasurements)\n\nmsg.content.measurements\n# 2-element Vector{MyMeasurement}:\n#  MyMeasurement(30, 180, 80.0)\n#  MyMeasurement(19, 190, nothing)

Or you can use the convenience wrapper ItemsExtract to extract multiple measurements (zero, one or more):

julia
using PromptingTools: ItemsExtract\n\nreturn_type = ItemsExtract{MyMeasurement}\nmsg = aiextract("James is 30, weighs 80kg. He's 180cm tall. Then Jack is 19 but really tall - over 190!"; return_type)\n\nmsg.content.items # see the extracted items

Or if you want your extraction to fail gracefully when data isn't found, use MaybeExtract{T} wrapper (this trick is inspired by the Instructor package!):

using PromptingTools: MaybeExtract\n\nreturn_type = MaybeExtract{MyMeasurement}\n# Effectively the same as:\n# struct MaybeExtract{T}\n#     result::Union{T, Nothing} // The result of the extraction\n#     error::Bool // true if a result is found, false otherwise\n#     message::Union{Nothing, String} // Only present if no result is found, should be short and concise\n# end\n\n# If LLM extraction fails, it will return a Dict with `error` and `message` fields instead of the result!\nmsg = aiextract("Extract measurements from the text: I am giraffe"; return_type)\nmsg.content\n# MaybeExtract{MyMeasurement}(nothing, true, "I'm sorry, but I can only assist with human measurements.")

That way, you can handle the error gracefully and get a reason why extraction failed (in msg.content.message).

Note that the error message refers to a giraffe not being a human, because in our MyMeasurement docstring, we said that it's for people!

Some non-OpenAI providers require a different specification of the "tool choice" than OpenAI. For example, to use Mistral models ("mistrall" for mistral large), do:

julia
"Some fruit"\nstruct Fruit\n    name::String\nend\naiextract("I ate an apple",return_type=Fruit,api_kwargs=(;tool_choice="any"),model="mistrall")\n# Notice two differences: 1) struct MUST have a docstring, 2) tool_choice is set explicitly set to "any"

Example of using a vector of field names with aiextract

julia
fields = [:location, :temperature => Float64, :condition => String]\nmsg = aiextract("Extract the following information from the text: location, temperature, condition. Text: The weather in New York is sunny and 72.5 degrees Fahrenheit."; return_type = fields)

Or simply call aiextract("some text"; return_type = [:reasoning,:answer]) to get a Chain of Thought reasoning for extraction task.

It will be returned it a new generated type, which you can check with PromptingTools.isextracted(msg.content) == true to confirm the data has been extracted correctly.

This new syntax also allows you to provide field-level descriptions, which will be passed to the model.

julia
fields_with_descriptions = [\n    :location,\n    :temperature => Float64,\n    :temperature__description => "Temperature in degrees Fahrenheit",\n    :condition => String,\n    :condition__description => "Current weather condition (e.g., sunny, rainy, cloudy)"\n]\nmsg = aiextract("The weather in New York is sunny and 72.5 degrees Fahrenheit."; return_type = fields_with_descriptions)

If you feel that the extraction is not smart/creative enough, you can use json_mode = true to enforce the JSON mode, which automatically enables the structured output mode (as opposed to function calling mode).

The JSON mode is useful for cases when you want to enforce a specific output format, such as JSON, and want the model to adhere to that format, but don't want to pretend it's a "function call". Expect a few second delay on the first call for a specific struct, as the provider has to produce the constrained grammer first.

julia
msg = aiextract("Extract the following information from the text: location, temperature, condition. Text: The weather in New York is sunny and 72.5 degrees Fahrenheit."; \nreturn_type = fields_with_descriptions, json_mode = true)\n# PromptingTools.DataMessage(NamedTuple)\n\nmsg.content\n# (location = "New York", temperature = 72.5, condition = "sunny")

It works equally well for structs provided as return types:

julia
msg = aiextract("James is 30, weighs 80kg. He's 180cm tall."; return_type=MyMeasurement, json_mode=true)

source


# PromptingTools.aiextractMethod.
julia
aiextract(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiextract call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractAnthropicSchema, prompt::ALLOWED_PROMPT_TYPE; verbose::Bool = true,\n    api_key::String = ANTHROPIC_API_KEY, model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    streamcallback::Any = nothing,\n    no_system_message::Bool = false,\n    aiprefill::Union{Nothing, AbstractString} = nothing,\n    http_kwargs::NamedTuple = NamedTuple(), api_kwargs::NamedTuple = NamedTuple(),\n    cache::Union{Nothing, Symbol} = nothing,\n    kwargs...)

Generate an AI response based on a given prompt using the Anthropic API.

Arguments

Note: At the moment, the cache is only allowed for prompt segments over 1024 tokens (in some cases, over 2048 tokens). You'll get an error if you try to cache short prompts.

Returns

Use msg.content to access the extracted string.

See also: ai_str, aai_str

Example

Simple hello world to test the API:

julia
const PT = PromptingTools\nschema = PT.AnthropicSchema() # We need to explicit if we want Anthropic, otherwise OpenAISchema is the default\n\nmsg = aigenerate(schema, "Say hi!"; model="claudeh") #claudeh is the model alias for Claude 3 Haiku, fast and cheap model\n[ Info: Tokens: 21 @ Cost: $0.0 in 0.6 seconds\nAIMessage("Hello!")

msg is an AIMessage object. Access the generated string via content property:

julia
typeof(msg) # AIMessage{SubString{String}}\npropertynames(msg) # (:content, :status, :tokens, :elapsed, :cost, :log_prob, :finish_reason, :run_id, :sample_id, :_type)\nmsg.content # "Hello!

Note: We need to be explicit about the schema we want to use. If we don't, it will default to OpenAISchema (=PT.DEFAULT_SCHEMA) Alternatively, if you provide a known model name or alias (eg, claudeh for Claude 3 Haiku - see MODEL_REGISTRY), the schema will be inferred from the model name.

We will use Claude 3 Haiku model for the following examples, so not need to specify the schema. See also "claudeo" and "claudes" for other Claude 3 models.

You can use string interpolation:

julia
const PT = PromptingTools\n\na = 1\nmsg=aigenerate("What is `$a+$a`?"; model="claudeh")\nmsg.content # "The answer to `1+1` is `2`."

___ You can provide the whole conversation or more intricate prompts as a Vector{AbstractMessage}. Claude models are good at completeling conversations that ended with an AIMessage (they just continue where it left off):

julia
const PT = PromptingTools\n\nconversation = [\n    PT.SystemMessage("You're master Yoda from Star Wars trying to help the user become a Yedi."),\n    PT.UserMessage("I have feelings for my iPhone. What should I do?"),\n    PT.AIMessage("Hmm, strong the attachment is,")]\n\nmsg = aigenerate(conversation; model="claudeh")\nAIMessage("I sense. But unhealthy it may be. Your iPhone, a tool it is, not a living being. Feelings of affection, understandable they are, <continues>")

Example of streaming:

julia
# Simplest usage, just provide where to steam the text\nmsg = aigenerate("Count from 1 to 100."; streamcallback = stdout, model="claudeh")\n\nstreamcallback = PT.StreamCallback()\nmsg = aigenerate("Count from 1 to 100."; streamcallback, model="claudeh")\n# this allows you to inspect each chunk with `streamcallback.chunks`. You can them empty it with `empty!(streamcallback)` in between repeated calls.\n\n# Get verbose output with details of each chunk\nstreamcallback = PT.StreamCallback(; verbose=true, throw_on_error=true)\nmsg = aigenerate("Count from 1 to 10."; streamcallback, model="claudeh")

Note: Streaming support is only for Anthropic models and it doesn't yet support tool calling and a few other features (logprobs, refusals, etc.)

You can also provide a prefill for the AI response to steer the response in a certain direction (eg, formatting, style):

julia
msg = aigenerate("Sum up 1 to 100."; aiprefill = "I'd be happy to answer in one number without any additional text. The answer is:", model="claudeh")

Note: It MUST NOT end with a trailing with space. You'll get an API error if you do.

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractGoogleSchema, prompt::ALLOWED_PROMPT_TYPE;\n    verbose::Bool = true,\n    api_key::String = GOOGLE_API_KEY,\n    model::String = "gemini-pro", return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    no_system_message::Bool = false,\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = NamedTuple(),\n    kwargs...)

Generate an AI response based on a given prompt using the Google Gemini API. Get the API key here.

Note:

Arguments

Returns

If return_all=false (default):

Use msg.content to access the extracted string.

If return_all=true:

See also: ai_str, aai_str, aiembed, aiclassify, aiextract, aiscan, aitemplates

Example

Simple hello world to test the API:

julia
result = aigenerate("Say Hi!"; model="gemini-pro")\n# AIMessage("Hi there! 👋 I'm here to help you with any questions or tasks you may have. Just let me know what you need, and I'll do my best to assist you.")

result is an AIMessage object. Access the generated string via content property:

julia
typeof(result) # AIMessage{SubString{String}}\npropertynames(result) # (:content, :status, :tokens, :elapsed\nresult.content # "Hi there! ...

___ You can use string interpolation and alias "gemini":

julia
a = 1\nmsg=aigenerate("What is `$a+$a`?"; model="gemini")\nmsg.content # "1+1 is 2."

___ You can provide the whole conversation or more intricate prompts as a Vector{AbstractMessage}:

julia
const PT = PromptingTools\n\nconversation = [\n    PT.SystemMessage("You're master Yoda from Star Wars trying to help the user become a Yedi."),\n    PT.UserMessage("I have feelings for my iPhone. What should I do?")]\nmsg=aigenerate(conversation; model="gemini")\n# AIMessage("Young Padawan, you have stumbled into a dangerous path.... <continues>")

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractOllamaManagedSchema, prompt::ALLOWED_PROMPT_TYPE; verbose::Bool = true,\n    api_key::String = "", model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    http_kwargs::NamedTuple = NamedTuple(), api_kwargs::NamedTuple = NamedTuple(),\n    kwargs...)

Generate an AI response based on a given prompt using the OpenAI API.

Arguments

Returns

Use msg.content to access the extracted string.

See also: ai_str, aai_str, aiembed

Example

Simple hello world to test the API:

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema() # We need to explicit if we want Ollama, OpenAISchema is the default\n\nmsg = aigenerate(schema, "Say hi!"; model="openhermes2.5-mistral")\n# [ Info: Tokens: 69 in 0.9 seconds\n# AIMessage("Hello! How can I assist you today?")

msg is an AIMessage object. Access the generated string via content property:

julia
typeof(msg) # AIMessage{SubString{String}}\npropertynames(msg) # (:content, :status, :tokens, :elapsed\nmsg.content # "Hello! How can I assist you today?"

Note: We need to be explicit about the schema we want to use. If we don't, it will default to OpenAISchema (=PT.DEFAULT_SCHEMA) ___ You can use string interpolation:

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\na = 1\nmsg=aigenerate(schema, "What is `$a+$a`?"; model="openhermes2.5-mistral")\nmsg.content # "The result of `1+1` is `2`."

___ You can provide the whole conversation or more intricate prompts as a Vector{AbstractMessage}:

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\n\nconversation = [\n    PT.SystemMessage("You're master Yoda from Star Wars trying to help the user become a Yedi."),\n    PT.UserMessage("I have feelings for my iPhone. What should I do?")]\n\nmsg = aigenerate(schema, conversation; model="openhermes2.5-mistral")\n# [ Info: Tokens: 111 in 2.1 seconds\n# AIMessage("Strong the attachment is, it leads to suffering it may. Focus on the force within you must, ...<continues>")

Note: Managed Ollama currently supports at most 1 User Message and 1 System Message given the API limitations. If you want more, you need to use the ChatMLSchema.

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractOllamaManagedSchema, prompt::ALLOWED_PROMPT_TYPE; verbose::Bool = true,\n    api_key::String = "", model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    http_kwargs::NamedTuple = NamedTuple(), api_kwargs::NamedTuple = NamedTuple(),\n    kwargs...)

Generate an AI response based on a given prompt using the OpenAI API.

Arguments

Returns

Use msg.content to access the extracted string.

See also: ai_str, aai_str, aiembed

Example

Simple hello world to test the API:

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema() # We need to explicit if we want Ollama, OpenAISchema is the default\n\nmsg = aigenerate(schema, "Say hi!"; model="openhermes2.5-mistral")\n# [ Info: Tokens: 69 in 0.9 seconds\n# AIMessage("Hello! How can I assist you today?")

msg is an AIMessage object. Access the generated string via content property:

julia
typeof(msg) # AIMessage{SubString{String}}\npropertynames(msg) # (:content, :status, :tokens, :elapsed\nmsg.content # "Hello! How can I assist you today?"

Note: We need to be explicit about the schema we want to use. If we don't, it will default to OpenAISchema (=PT.DEFAULT_SCHEMA) ___ You can use string interpolation:

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\na = 1\nmsg=aigenerate(schema, "What is `$a+$a`?"; model="openhermes2.5-mistral")\nmsg.content # "The result of `1+1` is `2`."

___ You can provide the whole conversation or more intricate prompts as a Vector{AbstractMessage}:

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\n\nconversation = [\n    PT.SystemMessage("You're master Yoda from Star Wars trying to help the user become a Yedi."),\n    PT.UserMessage("I have feelings for my iPhone. What should I do?")]\n\nmsg = aigenerate(schema, conversation; model="openhermes2.5-mistral")\n# [ Info: Tokens: 111 in 2.1 seconds\n# AIMessage("Strong the attachment is, it leads to suffering it may. Focus on the force within you must, ...<continues>")

Note: Managed Ollama currently supports at most 1 User Message and 1 System Message given the API limitations. If you want more, you need to use the ChatMLSchema.

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;\n    verbose::Bool = true,\n    api_key::String = OPENAI_API_KEY,\n    model::String = MODEL_CHAT, return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    streamcallback::Any = nothing,\n    no_system_message::Bool = false,\n    name_user::Union{Nothing, String} = nothing,\n    name_assistant::Union{Nothing, String} = nothing,\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = NamedTuple(),\n    kwargs...)

Generate an AI response based on a given prompt using the OpenAI API.

Arguments

Returns

If return_all=false (default):

Use msg.content to access the extracted string.

If return_all=true:

See also: ai_str, aai_str, aiembed, aiclassify, aiextract, aiscan, aitemplates

Example

Simple hello world to test the API:

julia
result = aigenerate("Say Hi!")\n# [ Info: Tokens: 29 @ Cost: $0.0 in 1.0 seconds\n# AIMessage("Hello! How can I assist you today?")

result is an AIMessage object. Access the generated string via content property:

julia
typeof(result) # AIMessage{SubString{String}}\npropertynames(result) # (:content, :status, :tokens, :elapsed\nresult.content # "Hello! How can I assist you today?"

___ You can use string interpolation:

julia
a = 1\nmsg=aigenerate("What is `$a+$a`?")\nmsg.content # "The sum of `1+1` is `2`."

___ You can provide the whole conversation or more intricate prompts as a Vector{AbstractMessage}:

julia
const PT = PromptingTools\n\nconversation = [\n    PT.SystemMessage("You're master Yoda from Star Wars trying to help the user become a Yedi."),\n    PT.UserMessage("I have feelings for my iPhone. What should I do?")]\nmsg=aigenerate(conversation)\n# AIMessage("Ah, strong feelings you have for your iPhone. A Jedi's path, this is not... <continues>")

Example of streaming:

julia
# Simplest usage, just provide where to steam the text\nmsg = aigenerate("Count from 1 to 100."; streamcallback = stdout)\n\nstreamcallback = PT.StreamCallback()\nmsg = aigenerate("Count from 1 to 100."; streamcallback)\n# this allows you to inspect each chunk with `streamcallback.chunks`. You can them empty it with `empty!(streamcallback)` in between repeated calls.\n\n# Get verbose output with details of each chunk\nstreamcallback = PT.StreamCallback(; verbose=true, throw_on_error=true)\nmsg = aigenerate("Count from 1 to 10."; streamcallback)

Learn more in ?StreamCallback. Note: Streaming support is only for OpenAI models and it doesn't yet support tool calling and a few other features (logprobs, refusals, etc.)

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;\n    tracer_kwargs = NamedTuple(), model = "", return_all::Bool = false, kwargs...)

Wraps the normal aigenerate call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

Example

julia
wrap_schema = PT.TracerSchema(PT.OpenAISchema())\nmsg = aigenerate(wrap_schema, "Say hi!"; model = "gpt4t")\nmsg isa TracerMessage # true\nmsg.content # access content like if it was the message\nPT.pprint(msg) # pretty-print the message

It works on a vector of messages and converts only the non-tracer ones, eg,

julia
wrap_schema = PT.TracerSchema(PT.OpenAISchema())\nconv = aigenerate(wrap_schema, "Say hi!"; model = "gpt4t", return_all = true)\nall(PT.istracermessage, conv) #true

source


# PromptingTools.aiimageMethod.
julia
aiimage(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;\n    image_size::AbstractString = "1024x1024",\n    image_quality::AbstractString = "standard",\n    image_n::Integer = 1,\n    verbose::Bool = true,\n    api_key::String = OPENAI_API_KEY,\n    model::String = MODEL_IMAGE_GENERATION,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = NamedTuple(),\n    kwargs...)

Generates an image from the provided prompt. If multiple "messages" are provided in prompt, it extracts the text ONLY from the last message!

Image (or the reference to it) will be returned in a DataMessage.content, the format will depend on the api_kwargs.response_format you set.

Can be used for generating images of varying quality and style with dall-e-* models. This function DOES NOT SUPPORT multi-turn conversations (ie, do not provide previous conversation via conversation argument).

Arguments

Returns

If return_all=false (default):

Use msg.content to access the extracted string.

If return_all=true:

See also: ai_str, aai_str, aigenerate, aiembed, aiclassify, aiextract, aiscan, aitemplates

Notes

Example

Generate an image:

julia
# You can experiment with `image_size`, `image_quality` kwargs!\nmsg = aiimage("A white cat on a car")\n\n# Download the image into a file\nusing Downloads\nDownloads.download(msg.content[:url], "cat_on_car.png")\n\n# You can also see the revised prompt that DALL-E 3 used\nmsg.content[:revised_prompt]\n# Output: "Visualize a pristine white cat gracefully perched atop a shiny car. \n# The cat's fur is stark white and its eyes bright with curiosity. \n# As for the car, it could be a contemporary sedan, glossy and in a vibrant color. \n# The scene could be set under the blue sky, enhancing the contrast between the white cat, the colorful car, and the bright blue sky."

Note that you MUST download any URL-based images within 60 minutes. The links will become inactive.

If you wanted to download image directly into the DataMessage, provide response_format="b64_json" in api_kwargs:

julia
msg = aiimage("A white cat on a car"; image_quality="hd", api_kwargs=(; response_format="b64_json"))\n\n# Then you need to use Base64 package to decode it and save it to a file:\nusing Base64\nwrite("cat_on_car_hd.png", base64decode(msg.content[:b64_json]));

source


# PromptingTools.aiimageMethod.
julia
aiimage(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiimage call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

source


# PromptingTools.aiscanMethod.
julia
aiscan([prompt_schema::AbstractOllamaSchema,] prompt::ALLOWED_PROMPT_TYPE; \nimage_url::Union{Nothing, AbstractString, Vector{<:AbstractString}} = nothing,\nimage_path::Union{Nothing, AbstractString, Vector{<:AbstractString}} = nothing,\nattach_to_latest::Bool = true,\nverbose::Bool = true, api_key::String = OPENAI_API_KEY,\n    model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    http_kwargs::NamedTuple = (;\n        retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), \n    api_kwargs::NamedTuple = = (; max_tokens = 2500),\n    kwargs...)

Scans the provided image (image_url or image_path) with the goal provided in the prompt.

Can be used for many multi-modal tasks, such as: OCR (transcribe text in the image), image captioning, image classification, etc.

It's effectively a light wrapper around aigenerate call, which uses additional keyword arguments image_url, image_path, image_detail to be provided. At least one image source (url or path) must be provided.

Arguments

Returns

If return_all=false (default):

Use msg.content to access the extracted string.

If return_all=true:

See also: ai_str, aai_str, aigenerate, aiembed, aiclassify, aiextract, aitemplates

Notes

Example

Describe the provided image:

julia
msg = aiscan("Describe the image"; image_path="julia.png", model="bakllava")\n# [ Info: Tokens: 1141 @ Cost: $0.0117 in 2.2 seconds\n# AIMessage("The image shows a logo consisting of the word "julia" written in lowercase")

You can provide multiple images at once as a vector and ask for "low" level of detail (cheaper):

julia
msg = aiscan("Describe the image"; image_path=["julia.png","python.png"] model="bakllava")

You can use this function as a nice and quick OCR (transcribe text in the image) with a template :OCRTask. Let's transcribe some SQL code from a screenshot (no more re-typing!):

julia
using Downloads\n# Screenshot of some SQL code -- we cannot use image_url directly, so we need to download it first\nimage_url = "https://www.sqlservercentral.com/wp-content/uploads/legacy/8755f69180b7ac7ee76a69ae68ec36872a116ad4/24622.png"\nimage_path = Downloads.download(image_url)\nmsg = aiscan(:OCRTask; image_path, model="bakllava", task="Transcribe the SQL code in the image.", api_kwargs=(; max_tokens=2500))\n\n# AIMessage("```sql\n# update Orders <continue>\n\n# You can add syntax highlighting of the outputs via Markdown\nusing Markdown\nmsg.content |> Markdown.parse

Local models cannot handle image URLs directly (image_url), so you need to download the image first and provide it as image_path:

julia
using Downloads\nimage_path = Downloads.download(image_url)

Notice that we set max_tokens = 2500. If your outputs seem truncated, it might be because the default maximum tokens on the server is set too low!

source


# PromptingTools.aiscanMethod.
julia
aiscan([prompt_schema::AbstractOpenAISchema,] prompt::ALLOWED_PROMPT_TYPE; \nimage_url::Union{Nothing, AbstractString, Vector{<:AbstractString}} = nothing,\nimage_path::Union{Nothing, AbstractString, Vector{<:AbstractString}} = nothing,\nimage_detail::AbstractString = "auto",\nattach_to_latest::Bool = true,\nverbose::Bool = true, api_key::String = OPENAI_API_KEY,\n    model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    http_kwargs::NamedTuple = (;\n        retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), \n    api_kwargs::NamedTuple = = (; max_tokens = 2500),\n    kwargs...)

Scans the provided image (image_url or image_path) with the goal provided in the prompt.

Can be used for many multi-modal tasks, such as: OCR (transcribe text in the image), image captioning, image classification, etc.

It's effectively a light wrapper around aigenerate call, which uses additional keyword arguments image_url, image_path, image_detail to be provided. At least one image source (url or path) must be provided.

Arguments

Returns

If return_all=false (default):

Use msg.content to access the extracted string.

If return_all=true:

See also: ai_str, aai_str, aigenerate, aiembed, aiclassify, aiextract, aitemplates

Notes

Example

Describe the provided image:

julia
msg = aiscan("Describe the image"; image_path="julia.png", model="gpt4v")\n# [ Info: Tokens: 1141 @ Cost: $0.0117 in 2.2 seconds\n# AIMessage("The image shows a logo consisting of the word "julia" written in lowercase")

You can provide multiple images at once as a vector and ask for "low" level of detail (cheaper):

julia
msg = aiscan("Describe the image"; image_path=["julia.png","python.png"], image_detail="low", model="gpt4v")

You can use this function as a nice and quick OCR (transcribe text in the image) with a template :OCRTask. Let's transcribe some SQL code from a screenshot (no more re-typing!):

julia
# Screenshot of some SQL code\nimage_url = "https://www.sqlservercentral.com/wp-content/uploads/legacy/8755f69180b7ac7ee76a69ae68ec36872a116ad4/24622.png"\nmsg = aiscan(:OCRTask; image_url, model="gpt4v", task="Transcribe the SQL code in the image.", api_kwargs=(; max_tokens=2500))\n\n# [ Info: Tokens: 362 @ Cost: $0.0045 in 2.5 seconds\n# AIMessage("```sql\n# update Orders <continue>\n\n# You can add syntax highlighting of the outputs via Markdown\nusing Markdown\nmsg.content |> Markdown.parse

Notice that we enforce max_tokens = 2500. That's because OpenAI seems to default to ~300 tokens, which provides incomplete outputs. Hence, we set this value to 2500 as a default. If you still get truncated outputs, increase this value.

source


# PromptingTools.aiscanMethod.
julia
aiscan(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiscan call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

source


# PromptingTools.aitemplatesFunction.
julia
aitemplates

Find easily the most suitable templates for your use case.

You can search by:

Keyword Arguments

Examples

Find available templates with aitemplates:

julia
tmps = aitemplates("JuliaExpertAsk")\n# Will surface one specific template\n# 1-element Vector{AITemplateMetadata}:\n# PromptingTools.AITemplateMetadata\n#   name: Symbol JuliaExpertAsk\n#   description: String "For asking questions about Julia language. Placeholders: `ask`"\n#   version: String "1"\n#   wordcount: Int64 237\n#   variables: Array{Symbol}((1,))\n#   system_preview: String "You are a world-class Julia language programmer with the knowledge of the latest syntax. Your commun"\n#   user_preview: String "# Question\n\n{{ask}}"\n#   source: String ""

The above gives you a good idea of what the template is about, what placeholders are available, and how much it would cost to use it (=wordcount).

Search for all Julia-related templates:

julia
tmps = aitemplates("Julia")\n# 2-element Vector{AITemplateMetadata}... -> more to come later!

If you are on VSCode, you can leverage nice tabular display with vscodedisplay:

julia
using DataFrames\ntmps = aitemplates("Julia") |> DataFrame |> vscodedisplay

I have my selected template, how do I use it? Just use the "name" in aigenerate or aiclassify like you see in the first example!

source


# PromptingTools.aitemplatesMethod.

Find the top-limit templates whose name or description fields partially match the query_key::String in TEMPLATE_METADATA.

source


# PromptingTools.aitemplatesMethod.

Find the top-limit templates where provided query_key::Regex matches either of name, description or previews or User or System messages in TEMPLATE_METADATA.

source


# PromptingTools.aitemplatesMethod.

Find the top-limit templates whose name::Symbol exactly matches the query_name::Symbol in TEMPLATE_METADATA.

source


# PromptingTools.aitoolsMethod.
julia
aitools(prompt_schema::AbstractAnthropicSchema, prompt::ALLOWED_PROMPT_TYPE;\n    kwargs...)\n    tools::Union{Type, Function, Method, AbstractTool, Vector} = Tool[],\n    verbose::Bool = true,\n    api_key::String = ANTHROPIC_API_KEY,\n    model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    no_system_message::Bool = false,\n    cache::Union{Nothing, Symbol} = nothing,\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = (;\n        tool_choice = nothing),\n    kwargs...)

Calls chat completion API with an optional tool call signature. It can receive both tools and standard string-based content. Ideal for agentic workflows with more complex cognitive architectures.

Difference to aigenerate: Response can be a tool call (structured)

Differences to aiextract: Can provide infinitely many tools (including Functions!) and then respond with the tool call's output.

Arguments

Example

julia
## Let's define a tool\nget_weather(location, date) = "The weather in $location on $date is 70 degrees."\n\nmsg = aitools("What's the weather in Tokyo on May 3rd, 2023?";\n    tools = get_weather, model = "claudeh")\nPT.execute_tool(get_weather, msg.tool_calls[1].args)\n# "The weather in Tokyo on 2023-05-03 is 70 degrees."\n\n# Ignores the tool\nmsg = aitools("What's your name?";\n    tools = get_weather, model = "claudeh")\n# I don't have a personal name, but you can call me your AI assistant!

How to have a multi-turn conversation with tools:

julia
conv = aitools("What's the weather in Tokyo on May 3rd, 2023?";\n    tools = get_weather, return_all = true, model = "claudeh")\n\ntool_msg = conv[end].tool_calls[1] # there can be multiple tool calls requested!!\n\n# Execute the output to the tool message content\ntool_msg.content = PT.execute_tool(get_weather, tool_msg.args)\n\n# Add the tool message to the conversation\npush!(conv, tool_msg)\n\n# Call LLM again with the updated conversation\nconv = aitools(\n    "And in New York?"; tools = get_weather, return_all = true, conversation = conv, model = "claudeh")\n# 6-element Vector{AbstractMessage}:\n# SystemMessage("Act as a helpful AI assistant")\n# UserMessage("What's the weather in Tokyo on May 3rd, 2023?")\n# AIToolRequest("-"; Tool Requests: 1)\n# ToolMessage("The weather in Tokyo on 2023-05-03 is 70 degrees.")\n# UserMessage("And in New York?")\n# AIToolRequest("-"; Tool Requests: 1)

source


# PromptingTools.aitoolsMethod.
julia
aitools(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;\n    tools::Union{Type, Function, Method, AbstractTool, Vector} = Tool[],\n    verbose::Bool = true,\n    api_key::String = OPENAI_API_KEY,\n    model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    no_system_message::Bool = false,\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = (;\n        tool_choice = nothing),\n    strict::Union{Nothing, Bool} = nothing,\n    json_mode::Union{Nothing, Bool} = nothing,\n    name_user::Union{Nothing, String} = nothing,\n    name_assistant::Union{Nothing, String} = nothing,\n    kwargs...)

Calls chat completion API with an optional tool call signature. It can receive both tools and standard string-based content. Ideal for agentic workflows with more complex cognitive architectures.

Difference to aigenerate: Response can be a tool call (structured)

Differences to aiextract: Can provide infinitely many tools (including Functions!) and then respond with the tool call's output.

Arguments

Example

julia
## Let's define a tool\nget_weather(location, date) = "The weather in $location on $date is 70 degrees."\n\n## JSON mode request\nmsg = aitools("What's the weather in Tokyo on May 3rd, 2023?";\n    tools = get_weather,\n    json_mode = true)\nPT.execute_tool(get_weather, msg.tool_calls[1].args)\n# "The weather in Tokyo on 2023-05-03 is 70 degrees."\n\n# Function calling request\nmsg = aitools("What's the weather in Tokyo on May 3rd, 2023?";\n    tools = get_weather)\nPT.execute_tool(get_weather, msg.tool_calls[1].args)\n# "The weather in Tokyo on 2023-05-03 is 70 degrees."\n\n# Ignores the tool\nmsg = aitools("What's your name?";\n    tools = get_weather)\n# I don't have a personal name, but you can call me your AI assistant!

How to have a multi-turn conversation with tools:

julia
conv = aitools("What's the weather in Tokyo on May 3rd, 2023?";\n    tools = get_weather, return_all = true)\n\ntool_msg = conv[end].tool_calls[1] # there can be multiple tool calls requested!!\n\n# Execute the output to the tool message content\ntool_msg.content = PT.execute_tool(get_weather, tool_msg.args)\n\n# Add the tool message to the conversation\npush!(conv, tool_msg)\n\n# Call LLM again with the updated conversation\nconv = aitools(\n    "And in New York?"; tools = get_weather, return_all = true, conversation = conv)\n# 6-element Vector{AbstractMessage}:\n# SystemMessage("Act as a helpful AI assistant")\n# UserMessage("What's the weather in Tokyo on May 3rd, 2023?")\n# AIToolRequest("-"; Tool Requests: 1)\n# ToolMessage("The weather in Tokyo on 2023-05-03 is 70 degrees.")\n# UserMessage("And in New York?")\n# AIToolRequest("-"; Tool Requests: 1)

source


# PromptingTools.aitoolsMethod.
julia
aitools(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aitools call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

source


# PromptingTools.align_tracer!Method.

Aligns multiple tracers in the vector to have the same Parent and Thread IDs as the first item.

source


# PromptingTools.align_tracer!Method.

Aligns the tracer message, updating the parent_id, thread_id. Often used to align multiple tracers in the vector to have the same IDs.

source


# PromptingTools.anthropic_apiFunction.
julia
anthropic_api(\n    prompt_schema::AbstractAnthropicSchema,\n    messages::Vector{<:AbstractDict{String, <:Any}} = Vector{Dict{String, Any}}();\n    api_key::AbstractString = ANTHROPIC_API_KEY,\n    system::Union{Nothing, AbstractString, AbstractVector{<:AbstractDict}} = nothing,\n    endpoint::String = "messages",\n    max_tokens::Int = 2048,\n    model::String = "claude-3-haiku-20240307", http_kwargs::NamedTuple = NamedTuple(),\n    stream::Bool = false,\n    url::String = "https://api.anthropic.com/v1",\n    cache::Union{Nothing, Symbol} = nothing,\n    kwargs...)

Simple wrapper for a call to Anthropic API.

Keyword Arguments

source


# PromptingTools.anthropic_extra_headersMethod.
julia
anthropic_extra_headers

Adds API version and beta headers to the request.

Kwargs / Beta headers

source


# PromptingTools.auth_headerMethod.
julia
auth_header(api_key::Union{Nothing, AbstractString};\n    bearer::Bool = true,\n    x_api_key::Bool = false,\n    extra_headers::AbstractVector = Vector{\n        Pair{String, String},\n    }[],\n    kwargs...)

Creates the authentication headers for any API request. Assumes that the communication is done in JSON format.

Arguments

source


# PromptingTools.build_response_bodyMethod.
julia
build_response_body(\n    flavor::AnthropicStream, cb::AbstractStreamCallback; verbose::Bool = false, kwargs...)

Build the response body from the chunks to mimic receiving a standard response from the API.

Note: Limited functionality for now. Does NOT support tool use. Use standard responses for these.

source


# PromptingTools.build_response_bodyMethod.
julia
build_response_body(flavor::OpenAIStream, cb::AbstractStreamCallback; verbose::Bool = false, kwargs...)

Build the response body from the chunks to mimic receiving a standard response from the API.

Note: Limited functionality for now. Does NOT support tool use, refusals, logprobs. Use standard responses for these.

source


# PromptingTools.build_template_metadataFunction.
julia
build_template_metadata(\n    template::AbstractVector{<:AbstractMessage}, template_name::Symbol,\n    metadata_msgs::AbstractVector{<:MetadataMessage} = MetadataMessage[]; max_length::Int = 100)

Builds AITemplateMetadata for a given template based on the messages in template and other information.

AITemplateMetadata is a helper struct for easy searching and reviewing of templates via aitemplates().

Note: Assumes that there is only ever one UserMessage and SystemMessage (concatenates them together)

source


# PromptingTools.call_costMethod.
julia
call_cost(prompt_tokens::Int, completion_tokens::Int, model::String;\n    cost_of_token_prompt::Number = get(MODEL_REGISTRY,\n        model,\n        (; cost_of_token_prompt = 0.0)).cost_of_token_prompt,\n    cost_of_token_generation::Number = get(MODEL_REGISTRY, model,\n        (; cost_of_token_generation = 0.0)).cost_of_token_generation)\n\ncall_cost(msg, model::String)

Calculate the cost of a call based on the number of tokens in the message and the cost per token.

Arguments

Returns

Examples

julia
# Assuming MODEL_REGISTRY is set up with appropriate costs\nMODEL_REGISTRY = Dict(\n    "model1" => (cost_of_token_prompt = 0.05, cost_of_token_generation = 0.10),\n    "model2" => (cost_of_token_prompt = 0.07, cost_of_token_generation = 0.02)\n)\n\ncost1 = call_cost(10, 20, "model1")\n\n# from message\nmsg1 = AIMessage(;tokens=[10, 20])  # 10 prompt tokens, 20 generation tokens\ncost1 = call_cost(msg1, "model1")\n# cost1 = 10 * 0.05 + 20 * 0.10 = 2.5\n\n# Using custom token costs\ncost2 = call_cost(10, 20, "model3"; cost_of_token_prompt = 0.08, cost_of_token_generation = 0.12)\n# cost2 = 10 * 0.08 + 20 * 0.12 = 3.2

source


# PromptingTools.call_cost_alternativeMethod.

call_cost_alternative()

Alternative cost calculation. Used to calculate cost of image generation with DALL-E 3 and similar.

source


# PromptingTools.callbackMethod.
julia
callback(cb::AbstractStreamCallback, chunk::StreamChunk; kwargs...)

Process the chunk to be printed and print it. It's a wrapper for two operations:

source


# PromptingTools.configure_callback!Method.
julia
configure_callback!(cb::StreamCallback, schema::AbstractPromptSchema;\n    api_kwargs...)

Configures the callback cb for streaming with a given prompt schema.

If no cb.flavor is provided, adjusts the flavor and the provided api_kwargs as necessary. Eg, for most schemas, we add kwargs like stream = true to the api_kwargs.

If cb.flavor is provided, both callback and api_kwargs are left unchanged! You need to configure them yourself!

source


', 73)), + _cache[49] || (_cache[49] = createStaticVNode('
# PromptingTools.aiembedFunction.
julia
aiembed(tracer_schema::AbstractTracerSchema,\n    doc_or_docs::Union{AbstractString, AbstractVector{<:AbstractString}}, postprocess::Function = identity;\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiembed call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

source


# PromptingTools.aiembedMethod.
julia
aiembed(prompt_schema::AbstractOllamaManagedSchema,\n        doc_or_docs::Union{AbstractString, AbstractVector{<:AbstractString}},\n        postprocess::F = identity;\n        verbose::Bool = true,\n        api_key::String = "",\n        model::String = MODEL_EMBEDDING,\n        http_kwargs::NamedTuple = (retry_non_idempotent = true,\n                                   retries = 5,\n                                   readtimeout = 120),\n        api_kwargs::NamedTuple = NamedTuple(),\n        kwargs...) where {F <: Function}

The aiembed function generates embeddings for the given input using a specified model and returns a message object containing the embeddings, status, token count, and elapsed time.

Arguments

Returns

Note: Ollama API currently does not return the token count, so it's set to (0,0)

Example

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\n\nmsg = aiembed(schema, "Hello World"; model="openhermes2.5-mistral")\nmsg.content # 4096-element JSON3.Array{Float64...

We can embed multiple strings at once and they will be hcat into a matrix (ie, each column corresponds to one string)

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\n\nmsg = aiembed(schema, ["Hello World", "How are you?"]; model="openhermes2.5-mistral")\nmsg.content # 4096×2 Matrix{Float64}:

If you plan to calculate the cosine distance between embeddings, you can normalize them first:

julia
const PT = PromptingTools\nusing LinearAlgebra\nschema = PT.OllamaManagedSchema()\n\nmsg = aiembed(schema, ["embed me", "and me too"], LinearAlgebra.normalize; model="openhermes2.5-mistral")\n\n# calculate cosine distance between the two normalized embeddings as a simple dot product\nmsg.content' * msg.content[:, 1] # [1.0, 0.34]

Similarly, you can use the postprocess argument to materialize the data from JSON3.Object by using postprocess = copy

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\n\nmsg = aiembed(schema, "Hello World", copy; model="openhermes2.5-mistral")\nmsg.content # 4096-element Vector{Float64}

source


# PromptingTools.aiembedMethod.
julia
aiembed(prompt_schema::AbstractOpenAISchema,\n        doc_or_docs::Union{AbstractString, AbstractVector{<:AbstractString}},\n        postprocess::F = identity;\n        verbose::Bool = true,\n        api_key::String = OPENAI_API_KEY,\n        model::String = MODEL_EMBEDDING, \n        http_kwargs::NamedTuple = (retry_non_idempotent = true,\n                                   retries = 5,\n                                   readtimeout = 120),\n        api_kwargs::NamedTuple = NamedTuple(),\n        kwargs...) where {F <: Function}

The aiembed function generates embeddings for the given input using a specified model and returns a message object containing the embeddings, status, token count, and elapsed time.

Arguments

Returns

Example

julia
msg = aiembed("Hello World")\nmsg.content # 1536-element JSON3.Array{Float64...

We can embed multiple strings at once and they will be hcat into a matrix (ie, each column corresponds to one string)

julia
msg = aiembed(["Hello World", "How are you?"])\nmsg.content # 1536×2 Matrix{Float64}:

If you plan to calculate the cosine distance between embeddings, you can normalize them first:

julia
using LinearAlgebra\nmsg = aiembed(["embed me", "and me too"], LinearAlgebra.normalize)\n\n# calculate cosine distance between the two normalized embeddings as a simple dot product\nmsg.content' * msg.content[:, 1] # [1.0, 0.787]

source


# PromptingTools.aiextractMethod.
julia
aiextract(prompt_schema::AbstractAnthropicSchema, prompt::ALLOWED_PROMPT_TYPE;\n    return_type::Union{Type, AbstractTool, Vector},\n    verbose::Bool = true,\n    api_key::String = ANTHROPIC_API_KEY,\n    model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    no_system_message::Bool = false,\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = NamedTuple(),\n    cache::Union{Nothing, Symbol} = nothing,\n    kwargs...)

Extract required information (defined by a struct return_type) from the provided prompt by leveraging Anthropic's function calling mode.

This is a perfect solution for extracting structured information from text (eg, extract organization names in news articles, etc.).

Read best practics here.

It's effectively a light wrapper around aigenerate call, which requires additional keyword argument return_type to be provided and will enforce the model outputs to adhere to it.

Arguments

Note: At the moment, the cache is only allowed for prompt segments over 1024 tokens (in some cases, over 2048 tokens). You'll get an error if you try to cache short prompts.

Returns

If return_all=false (default):

If return_all=true:

See also: tool_call_signature, MaybeExtract, ItemsExtract, aigenerate

Example

Do you want to extract some specific measurements from a text like age, weight and height? You need to define the information you need as a struct (return_type):

"Person's age, height, and weight."\nstruct MyMeasurement\n    age::Int # required\n    height::Union{Int,Nothing} # optional\n    weight::Union{Nothing,Float64} # optional\nend\nmsg = aiextract("James is 30, weighs 80kg. He's 180cm tall."; model="claudeh", return_type=MyMeasurement)\n# PromptingTools.DataMessage(MyMeasurement)\nmsg.content\n# MyMeasurement(30, 180, 80.0)

The fields that allow Nothing are marked as optional in the schema:

msg = aiextract("James is 30."; model="claudeh", return_type=MyMeasurement)\n# MyMeasurement(30, nothing, nothing)

If there are multiple items you want to extract, define a wrapper struct to get a Vector of MyMeasurement:

struct ManyMeasurements\n    measurements::Vector{MyMeasurement}\nend\n\nmsg = aiextract("James is 30, weighs 80kg. He's 180cm tall. Then Jack is 19 but really tall - over 190!"; model="claudeh", return_type=ManyMeasurements)\n\nmsg.content.measurements\n# 2-element Vector{MyMeasurement}:\n#  MyMeasurement(30, 180, 80.0)\n#  MyMeasurement(19, 190, nothing)

Or you can use the convenience wrapper ItemsExtract to extract multiple measurements (zero, one or more):

julia
using PromptingTools: ItemsExtract\n\nreturn_type = ItemsExtract{MyMeasurement}\nmsg = aiextract("James is 30, weighs 80kg. He's 180cm tall. Then Jack is 19 but really tall - over 190!"; model="claudeh", return_type)\n\nmsg.content.items # see the extracted items

Or if you want your extraction to fail gracefully when data isn't found, use MaybeExtract{T} wrapper (this trick is inspired by the Instructor package!):

using PromptingTools: MaybeExtract\n\nreturn_type = MaybeExtract{MyMeasurement}\n# Effectively the same as:\n# struct MaybeExtract{T}\n#     result::Union{T, Nothing} // The result of the extraction\n#     error::Bool // true if a result is found, false otherwise\n#     message::Union{Nothing, String} // Only present if no result is found, should be short and concise\n# end\n\n# If LLM extraction fails, it will return a Dict with `error` and `message` fields instead of the result!\nmsg = aiextract("Extract measurements from the text: I am giraffe"; model="claudeo", return_type)\nmsg.content\n# Output: MaybeExtract{MyMeasurement}(nothing, true, "I'm sorry, but your input of "I am giraffe" does not contain any information about a person's age, height or weight measurements that I can extract. To use this tool, please provide a statement that includes at least the person's age, and optionally their height in inches and weight in pounds. Without that information, I am unable to extract the requested measurements.")

That way, you can handle the error gracefully and get a reason why extraction failed (in msg.content.message).

However, this can fail with weaker models like claudeh, so we can apply some of our prompt templates with embedding reasoning step:

julia
msg = aiextract(:ExtractDataCoTXML; data="I am giraffe", model="claudeh", return_type)\nmsg.content\n# Output: MaybeExtract{MyMeasurement}(nothing, true, "The provided data does not contain the expected information about a person's age, height, and weight.")

Note that when using a prompt template, we provide data for the extraction as the corresponding placeholder (see aitemplates("extract") for documentation of this template).

Note that the error message refers to a giraffe not being a human, because in our MyMeasurement docstring, we said that it's for people!

Example of using a vector of field names with aiextract

julia
fields = [:location, :temperature => Float64, :condition => String]\nmsg = aiextract("Extract the following information from the text: location, temperature, condition. Text: The weather in New York is sunny and 72.5 degrees Fahrenheit."; \nreturn_type = fields, model="claudeh")

Or simply call aiextract("some text"; return_type = [:reasoning,:answer], model="claudeh") to get a Chain of Thought reasoning for extraction task.

It will be returned it a new generated type, which you can check with PromptingTools.isextracted(msg.content) == true to confirm the data has been extracted correctly.

This new syntax also allows you to provide field-level descriptions, which will be passed to the model.

julia
fields_with_descriptions = [\n    :location,\n    :temperature => Float64,\n    :temperature__description => "Temperature in degrees Fahrenheit",\n    :condition => String,\n    :condition__description => "Current weather condition (e.g., sunny, rainy, cloudy)"\n]\nmsg = aiextract("The weather in New York is sunny and 72.5 degrees Fahrenheit."; return_type = fields_with_descriptions, model="claudeh")

source


# PromptingTools.aiextractMethod.
julia
aiextract(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;\n    return_type::Union{Type, AbstractTool, Vector},\n    verbose::Bool = true,\n    api_key::String = OPENAI_API_KEY,\n    model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = (;\n        tool_choice = nothing),\n    strict::Union{Nothing, Bool} = nothing,\n    kwargs...)

Extract required information (defined by a struct return_type) from the provided prompt by leveraging OpenAI function calling mode.

This is a perfect solution for extracting structured information from text (eg, extract organization names in news articles, etc.)

It's effectively a light wrapper around aigenerate call, which requires additional keyword argument return_type to be provided and will enforce the model outputs to adhere to it.

Arguments

Returns

If return_all=false (default):

If return_all=true:

Note: msg.content can be a single object (if a single tool is used) or a vector of objects (if multiple tools are used)!

See also: tool_call_signature, MaybeExtract, ItemsExtract, aigenerate, generate_struct

Example

Do you want to extract some specific measurements from a text like age, weight and height? You need to define the information you need as a struct (return_type):

"Person's age, height, and weight."\nstruct MyMeasurement\n    age::Int # required\n    height::Union{Int,Nothing} # optional\n    weight::Union{Nothing,Float64} # optional\nend\nmsg = aiextract("James is 30, weighs 80kg. He's 180cm tall."; return_type=MyMeasurement)\n# PromptingTools.DataMessage(MyMeasurement)\nmsg.content\n# MyMeasurement(30, 180, 80.0)

The fields that allow Nothing are marked as optional in the schema:

msg = aiextract("James is 30."; return_type=MyMeasurement)\n# MyMeasurement(30, nothing, nothing)

If there are multiple items you want to extract, define a wrapper struct to get a Vector of MyMeasurement:

struct ManyMeasurements\n    measurements::Vector{MyMeasurement}\nend\n\nmsg = aiextract("James is 30, weighs 80kg. He's 180cm tall. Then Jack is 19 but really tall - over 190!"; return_type=ManyMeasurements)\n\nmsg.content.measurements\n# 2-element Vector{MyMeasurement}:\n#  MyMeasurement(30, 180, 80.0)\n#  MyMeasurement(19, 190, nothing)

Or you can use the convenience wrapper ItemsExtract to extract multiple measurements (zero, one or more):

julia
using PromptingTools: ItemsExtract\n\nreturn_type = ItemsExtract{MyMeasurement}\nmsg = aiextract("James is 30, weighs 80kg. He's 180cm tall. Then Jack is 19 but really tall - over 190!"; return_type)\n\nmsg.content.items # see the extracted items

Or if you want your extraction to fail gracefully when data isn't found, use MaybeExtract{T} wrapper (this trick is inspired by the Instructor package!):

using PromptingTools: MaybeExtract\n\nreturn_type = MaybeExtract{MyMeasurement}\n# Effectively the same as:\n# struct MaybeExtract{T}\n#     result::Union{T, Nothing} // The result of the extraction\n#     error::Bool // true if a result is found, false otherwise\n#     message::Union{Nothing, String} // Only present if no result is found, should be short and concise\n# end\n\n# If LLM extraction fails, it will return a Dict with `error` and `message` fields instead of the result!\nmsg = aiextract("Extract measurements from the text: I am giraffe"; return_type)\nmsg.content\n# MaybeExtract{MyMeasurement}(nothing, true, "I'm sorry, but I can only assist with human measurements.")

That way, you can handle the error gracefully and get a reason why extraction failed (in msg.content.message).

Note that the error message refers to a giraffe not being a human, because in our MyMeasurement docstring, we said that it's for people!

Some non-OpenAI providers require a different specification of the "tool choice" than OpenAI. For example, to use Mistral models ("mistrall" for mistral large), do:

julia
"Some fruit"\nstruct Fruit\n    name::String\nend\naiextract("I ate an apple",return_type=Fruit,api_kwargs=(;tool_choice="any"),model="mistrall")\n# Notice two differences: 1) struct MUST have a docstring, 2) tool_choice is set explicitly set to "any"

Example of using a vector of field names with aiextract

julia
fields = [:location, :temperature => Float64, :condition => String]\nmsg = aiextract("Extract the following information from the text: location, temperature, condition. Text: The weather in New York is sunny and 72.5 degrees Fahrenheit."; return_type = fields)

Or simply call aiextract("some text"; return_type = [:reasoning,:answer]) to get a Chain of Thought reasoning for extraction task.

It will be returned it a new generated type, which you can check with PromptingTools.isextracted(msg.content) == true to confirm the data has been extracted correctly.

This new syntax also allows you to provide field-level descriptions, which will be passed to the model.

julia
fields_with_descriptions = [\n    :location,\n    :temperature => Float64,\n    :temperature__description => "Temperature in degrees Fahrenheit",\n    :condition => String,\n    :condition__description => "Current weather condition (e.g., sunny, rainy, cloudy)"\n]\nmsg = aiextract("The weather in New York is sunny and 72.5 degrees Fahrenheit."; return_type = fields_with_descriptions)

If you feel that the extraction is not smart/creative enough, you can use json_mode = true to enforce the JSON mode, which automatically enables the structured output mode (as opposed to function calling mode).

The JSON mode is useful for cases when you want to enforce a specific output format, such as JSON, and want the model to adhere to that format, but don't want to pretend it's a "function call". Expect a few second delay on the first call for a specific struct, as the provider has to produce the constrained grammer first.

julia
msg = aiextract("Extract the following information from the text: location, temperature, condition. Text: The weather in New York is sunny and 72.5 degrees Fahrenheit."; \nreturn_type = fields_with_descriptions, json_mode = true)\n# PromptingTools.DataMessage(NamedTuple)\n\nmsg.content\n# (location = "New York", temperature = 72.5, condition = "sunny")

It works equally well for structs provided as return types:

julia
msg = aiextract("James is 30, weighs 80kg. He's 180cm tall."; return_type=MyMeasurement, json_mode=true)

source


# PromptingTools.aiextractMethod.
julia
aiextract(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiextract call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractAnthropicSchema, prompt::ALLOWED_PROMPT_TYPE; verbose::Bool = true,\n    api_key::String = ANTHROPIC_API_KEY, model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    streamcallback::Any = nothing,\n    no_system_message::Bool = false,\n    aiprefill::Union{Nothing, AbstractString} = nothing,\n    http_kwargs::NamedTuple = NamedTuple(), api_kwargs::NamedTuple = NamedTuple(),\n    cache::Union{Nothing, Symbol} = nothing,\n    kwargs...)

Generate an AI response based on a given prompt using the Anthropic API.

Arguments

Note: At the moment, the cache is only allowed for prompt segments over 1024 tokens (in some cases, over 2048 tokens). You'll get an error if you try to cache short prompts.

Returns

Use msg.content to access the extracted string.

See also: ai_str, aai_str

Example

Simple hello world to test the API:

julia
const PT = PromptingTools\nschema = PT.AnthropicSchema() # We need to explicit if we want Anthropic, otherwise OpenAISchema is the default\n\nmsg = aigenerate(schema, "Say hi!"; model="claudeh") #claudeh is the model alias for Claude 3 Haiku, fast and cheap model\n[ Info: Tokens: 21 @ Cost: $0.0 in 0.6 seconds\nAIMessage("Hello!")

msg is an AIMessage object. Access the generated string via content property:

julia
typeof(msg) # AIMessage{SubString{String}}\npropertynames(msg) # (:content, :status, :tokens, :elapsed, :cost, :log_prob, :finish_reason, :run_id, :sample_id, :_type)\nmsg.content # "Hello!

Note: We need to be explicit about the schema we want to use. If we don't, it will default to OpenAISchema (=PT.DEFAULT_SCHEMA) Alternatively, if you provide a known model name or alias (eg, claudeh for Claude 3 Haiku - see MODEL_REGISTRY), the schema will be inferred from the model name.

We will use Claude 3 Haiku model for the following examples, so not need to specify the schema. See also "claudeo" and "claudes" for other Claude 3 models.

You can use string interpolation:

julia
const PT = PromptingTools\n\na = 1\nmsg=aigenerate("What is `$a+$a`?"; model="claudeh")\nmsg.content # "The answer to `1+1` is `2`."

___ You can provide the whole conversation or more intricate prompts as a Vector{AbstractMessage}. Claude models are good at completeling conversations that ended with an AIMessage (they just continue where it left off):

julia
const PT = PromptingTools\n\nconversation = [\n    PT.SystemMessage("You're master Yoda from Star Wars trying to help the user become a Yedi."),\n    PT.UserMessage("I have feelings for my iPhone. What should I do?"),\n    PT.AIMessage("Hmm, strong the attachment is,")]\n\nmsg = aigenerate(conversation; model="claudeh")\nAIMessage("I sense. But unhealthy it may be. Your iPhone, a tool it is, not a living being. Feelings of affection, understandable they are, <continues>")

Example of streaming:

julia
# Simplest usage, just provide where to steam the text\nmsg = aigenerate("Count from 1 to 100."; streamcallback = stdout, model="claudeh")\n\nstreamcallback = PT.StreamCallback()\nmsg = aigenerate("Count from 1 to 100."; streamcallback, model="claudeh")\n# this allows you to inspect each chunk with `streamcallback.chunks`. You can them empty it with `empty!(streamcallback)` in between repeated calls.\n\n# Get verbose output with details of each chunk\nstreamcallback = PT.StreamCallback(; verbose=true, throw_on_error=true)\nmsg = aigenerate("Count from 1 to 10."; streamcallback, model="claudeh")

Note: Streaming support is only for Anthropic models and it doesn't yet support tool calling and a few other features (logprobs, refusals, etc.)

You can also provide a prefill for the AI response to steer the response in a certain direction (eg, formatting, style):

julia
msg = aigenerate("Sum up 1 to 100."; aiprefill = "I'd be happy to answer in one number without any additional text. The answer is:", model="claudeh")

Note: It MUST NOT end with a trailing with space. You'll get an API error if you do.

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractGoogleSchema, prompt::ALLOWED_PROMPT_TYPE;\n    verbose::Bool = true,\n    api_key::String = GOOGLE_API_KEY,\n    model::String = "gemini-pro", return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    no_system_message::Bool = false,\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = NamedTuple(),\n    kwargs...)

Generate an AI response based on a given prompt using the Google Gemini API. Get the API key here.

Note:

Arguments

Returns

If return_all=false (default):

Use msg.content to access the extracted string.

If return_all=true:

See also: ai_str, aai_str, aiembed, aiclassify, aiextract, aiscan, aitemplates

Example

Simple hello world to test the API:

julia
result = aigenerate("Say Hi!"; model="gemini-pro")\n# AIMessage("Hi there! 👋 I'm here to help you with any questions or tasks you may have. Just let me know what you need, and I'll do my best to assist you.")

result is an AIMessage object. Access the generated string via content property:

julia
typeof(result) # AIMessage{SubString{String}}\npropertynames(result) # (:content, :status, :tokens, :elapsed\nresult.content # "Hi there! ...

___ You can use string interpolation and alias "gemini":

julia
a = 1\nmsg=aigenerate("What is `$a+$a`?"; model="gemini")\nmsg.content # "1+1 is 2."

___ You can provide the whole conversation or more intricate prompts as a Vector{AbstractMessage}:

julia
const PT = PromptingTools\n\nconversation = [\n    PT.SystemMessage("You're master Yoda from Star Wars trying to help the user become a Yedi."),\n    PT.UserMessage("I have feelings for my iPhone. What should I do?")]\nmsg=aigenerate(conversation; model="gemini")\n# AIMessage("Young Padawan, you have stumbled into a dangerous path.... <continues>")

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractOllamaManagedSchema, prompt::ALLOWED_PROMPT_TYPE; verbose::Bool = true,\n    api_key::String = "", model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    http_kwargs::NamedTuple = NamedTuple(), api_kwargs::NamedTuple = NamedTuple(),\n    kwargs...)

Generate an AI response based on a given prompt using the OpenAI API.

Arguments

Returns

Use msg.content to access the extracted string.

See also: ai_str, aai_str, aiembed

Example

Simple hello world to test the API:

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema() # We need to explicit if we want Ollama, OpenAISchema is the default\n\nmsg = aigenerate(schema, "Say hi!"; model="openhermes2.5-mistral")\n# [ Info: Tokens: 69 in 0.9 seconds\n# AIMessage("Hello! How can I assist you today?")

msg is an AIMessage object. Access the generated string via content property:

julia
typeof(msg) # AIMessage{SubString{String}}\npropertynames(msg) # (:content, :status, :tokens, :elapsed\nmsg.content # "Hello! How can I assist you today?"

Note: We need to be explicit about the schema we want to use. If we don't, it will default to OpenAISchema (=PT.DEFAULT_SCHEMA) ___ You can use string interpolation:

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\na = 1\nmsg=aigenerate(schema, "What is `$a+$a`?"; model="openhermes2.5-mistral")\nmsg.content # "The result of `1+1` is `2`."

___ You can provide the whole conversation or more intricate prompts as a Vector{AbstractMessage}:

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\n\nconversation = [\n    PT.SystemMessage("You're master Yoda from Star Wars trying to help the user become a Yedi."),\n    PT.UserMessage("I have feelings for my iPhone. What should I do?")]\n\nmsg = aigenerate(schema, conversation; model="openhermes2.5-mistral")\n# [ Info: Tokens: 111 in 2.1 seconds\n# AIMessage("Strong the attachment is, it leads to suffering it may. Focus on the force within you must, ...<continues>")

Note: Managed Ollama currently supports at most 1 User Message and 1 System Message given the API limitations. If you want more, you need to use the ChatMLSchema.

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractOllamaManagedSchema, prompt::ALLOWED_PROMPT_TYPE; verbose::Bool = true,\n    api_key::String = "", model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    http_kwargs::NamedTuple = NamedTuple(), api_kwargs::NamedTuple = NamedTuple(),\n    kwargs...)

Generate an AI response based on a given prompt using the OpenAI API.

Arguments

Returns

Use msg.content to access the extracted string.

See also: ai_str, aai_str, aiembed

Example

Simple hello world to test the API:

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema() # We need to explicit if we want Ollama, OpenAISchema is the default\n\nmsg = aigenerate(schema, "Say hi!"; model="openhermes2.5-mistral")\n# [ Info: Tokens: 69 in 0.9 seconds\n# AIMessage("Hello! How can I assist you today?")

msg is an AIMessage object. Access the generated string via content property:

julia
typeof(msg) # AIMessage{SubString{String}}\npropertynames(msg) # (:content, :status, :tokens, :elapsed\nmsg.content # "Hello! How can I assist you today?"

Note: We need to be explicit about the schema we want to use. If we don't, it will default to OpenAISchema (=PT.DEFAULT_SCHEMA) ___ You can use string interpolation:

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\na = 1\nmsg=aigenerate(schema, "What is `$a+$a`?"; model="openhermes2.5-mistral")\nmsg.content # "The result of `1+1` is `2`."

___ You can provide the whole conversation or more intricate prompts as a Vector{AbstractMessage}:

julia
const PT = PromptingTools\nschema = PT.OllamaManagedSchema()\n\nconversation = [\n    PT.SystemMessage("You're master Yoda from Star Wars trying to help the user become a Yedi."),\n    PT.UserMessage("I have feelings for my iPhone. What should I do?")]\n\nmsg = aigenerate(schema, conversation; model="openhermes2.5-mistral")\n# [ Info: Tokens: 111 in 2.1 seconds\n# AIMessage("Strong the attachment is, it leads to suffering it may. Focus on the force within you must, ...<continues>")

Note: Managed Ollama currently supports at most 1 User Message and 1 System Message given the API limitations. If you want more, you need to use the ChatMLSchema.

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;\n    verbose::Bool = true,\n    api_key::String = OPENAI_API_KEY,\n    model::String = MODEL_CHAT, return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    streamcallback::Any = nothing,\n    no_system_message::Bool = false,\n    name_user::Union{Nothing, String} = nothing,\n    name_assistant::Union{Nothing, String} = nothing,\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = NamedTuple(),\n    kwargs...)

Generate an AI response based on a given prompt using the OpenAI API.

Arguments

Returns

If return_all=false (default):

Use msg.content to access the extracted string.

If return_all=true:

See also: ai_str, aai_str, aiembed, aiclassify, aiextract, aiscan, aitemplates

Example

Simple hello world to test the API:

julia
result = aigenerate("Say Hi!")\n# [ Info: Tokens: 29 @ Cost: $0.0 in 1.0 seconds\n# AIMessage("Hello! How can I assist you today?")

result is an AIMessage object. Access the generated string via content property:

julia
typeof(result) # AIMessage{SubString{String}}\npropertynames(result) # (:content, :status, :tokens, :elapsed\nresult.content # "Hello! How can I assist you today?"

___ You can use string interpolation:

julia
a = 1\nmsg=aigenerate("What is `$a+$a`?")\nmsg.content # "The sum of `1+1` is `2`."

___ You can provide the whole conversation or more intricate prompts as a Vector{AbstractMessage}:

julia
const PT = PromptingTools\n\nconversation = [\n    PT.SystemMessage("You're master Yoda from Star Wars trying to help the user become a Yedi."),\n    PT.UserMessage("I have feelings for my iPhone. What should I do?")]\nmsg=aigenerate(conversation)\n# AIMessage("Ah, strong feelings you have for your iPhone. A Jedi's path, this is not... <continues>")

Example of streaming:

julia
# Simplest usage, just provide where to steam the text\nmsg = aigenerate("Count from 1 to 100."; streamcallback = stdout)\n\nstreamcallback = PT.StreamCallback()\nmsg = aigenerate("Count from 1 to 100."; streamcallback)\n# this allows you to inspect each chunk with `streamcallback.chunks`. You can them empty it with `empty!(streamcallback)` in between repeated calls.\n\n# Get verbose output with details of each chunk\nstreamcallback = PT.StreamCallback(; verbose=true, throw_on_error=true)\nmsg = aigenerate("Count from 1 to 10."; streamcallback)

Learn more in ?StreamCallback. Note: Streaming support is only for OpenAI models and it doesn't yet support tool calling and a few other features (logprobs, refusals, etc.)

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;\n    tracer_kwargs = NamedTuple(), model = "", return_all::Bool = false, kwargs...)

Wraps the normal aigenerate call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

Example

julia
wrap_schema = PT.TracerSchema(PT.OpenAISchema())\nmsg = aigenerate(wrap_schema, "Say hi!"; model = "gpt4t")\nmsg isa TracerMessage # true\nmsg.content # access content like if it was the message\nPT.pprint(msg) # pretty-print the message

It works on a vector of messages and converts only the non-tracer ones, eg,

julia
wrap_schema = PT.TracerSchema(PT.OpenAISchema())\nconv = aigenerate(wrap_schema, "Say hi!"; model = "gpt4t", return_all = true)\nall(PT.istracermessage, conv) #true

source


# PromptingTools.aiimageMethod.
julia
aiimage(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;\n    image_size::AbstractString = "1024x1024",\n    image_quality::AbstractString = "standard",\n    image_n::Integer = 1,\n    verbose::Bool = true,\n    api_key::String = OPENAI_API_KEY,\n    model::String = MODEL_IMAGE_GENERATION,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = NamedTuple(),\n    kwargs...)

Generates an image from the provided prompt. If multiple "messages" are provided in prompt, it extracts the text ONLY from the last message!

Image (or the reference to it) will be returned in a DataMessage.content, the format will depend on the api_kwargs.response_format you set.

Can be used for generating images of varying quality and style with dall-e-* models. This function DOES NOT SUPPORT multi-turn conversations (ie, do not provide previous conversation via conversation argument).

Arguments

Returns

If return_all=false (default):

Use msg.content to access the extracted string.

If return_all=true:

See also: ai_str, aai_str, aigenerate, aiembed, aiclassify, aiextract, aiscan, aitemplates

Notes

Example

Generate an image:

julia
# You can experiment with `image_size`, `image_quality` kwargs!\nmsg = aiimage("A white cat on a car")\n\n# Download the image into a file\nusing Downloads\nDownloads.download(msg.content[:url], "cat_on_car.png")\n\n# You can also see the revised prompt that DALL-E 3 used\nmsg.content[:revised_prompt]\n# Output: "Visualize a pristine white cat gracefully perched atop a shiny car. \n# The cat's fur is stark white and its eyes bright with curiosity. \n# As for the car, it could be a contemporary sedan, glossy and in a vibrant color. \n# The scene could be set under the blue sky, enhancing the contrast between the white cat, the colorful car, and the bright blue sky."

Note that you MUST download any URL-based images within 60 minutes. The links will become inactive.

If you wanted to download image directly into the DataMessage, provide response_format="b64_json" in api_kwargs:

julia
msg = aiimage("A white cat on a car"; image_quality="hd", api_kwargs=(; response_format="b64_json"))\n\n# Then you need to use Base64 package to decode it and save it to a file:\nusing Base64\nwrite("cat_on_car_hd.png", base64decode(msg.content[:b64_json]));

source


# PromptingTools.aiimageMethod.
julia
aiimage(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiimage call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

source


# PromptingTools.aiscanMethod.
julia
aiscan([prompt_schema::AbstractOllamaSchema,] prompt::ALLOWED_PROMPT_TYPE; \nimage_url::Union{Nothing, AbstractString, Vector{<:AbstractString}} = nothing,\nimage_path::Union{Nothing, AbstractString, Vector{<:AbstractString}} = nothing,\nattach_to_latest::Bool = true,\nverbose::Bool = true, api_key::String = OPENAI_API_KEY,\n    model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    http_kwargs::NamedTuple = (;\n        retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), \n    api_kwargs::NamedTuple = = (; max_tokens = 2500),\n    kwargs...)

Scans the provided image (image_url or image_path) with the goal provided in the prompt.

Can be used for many multi-modal tasks, such as: OCR (transcribe text in the image), image captioning, image classification, etc.

It's effectively a light wrapper around aigenerate call, which uses additional keyword arguments image_url, image_path, image_detail to be provided. At least one image source (url or path) must be provided.

Arguments

Returns

If return_all=false (default):

Use msg.content to access the extracted string.

If return_all=true:

See also: ai_str, aai_str, aigenerate, aiembed, aiclassify, aiextract, aitemplates

Notes

Example

Describe the provided image:

julia
msg = aiscan("Describe the image"; image_path="julia.png", model="bakllava")\n# [ Info: Tokens: 1141 @ Cost: $0.0117 in 2.2 seconds\n# AIMessage("The image shows a logo consisting of the word "julia" written in lowercase")

You can provide multiple images at once as a vector and ask for "low" level of detail (cheaper):

julia
msg = aiscan("Describe the image"; image_path=["julia.png","python.png"] model="bakllava")

You can use this function as a nice and quick OCR (transcribe text in the image) with a template :OCRTask. Let's transcribe some SQL code from a screenshot (no more re-typing!):

julia
using Downloads\n# Screenshot of some SQL code -- we cannot use image_url directly, so we need to download it first\nimage_url = "https://www.sqlservercentral.com/wp-content/uploads/legacy/8755f69180b7ac7ee76a69ae68ec36872a116ad4/24622.png"\nimage_path = Downloads.download(image_url)\nmsg = aiscan(:OCRTask; image_path, model="bakllava", task="Transcribe the SQL code in the image.", api_kwargs=(; max_tokens=2500))\n\n# AIMessage("```sql\n# update Orders <continue>\n\n# You can add syntax highlighting of the outputs via Markdown\nusing Markdown\nmsg.content |> Markdown.parse

Local models cannot handle image URLs directly (image_url), so you need to download the image first and provide it as image_path:

julia
using Downloads\nimage_path = Downloads.download(image_url)

Notice that we set max_tokens = 2500. If your outputs seem truncated, it might be because the default maximum tokens on the server is set too low!

source


# PromptingTools.aiscanMethod.
julia
aiscan([prompt_schema::AbstractOpenAISchema,] prompt::ALLOWED_PROMPT_TYPE; \nimage_url::Union{Nothing, AbstractString, Vector{<:AbstractString}} = nothing,\nimage_path::Union{Nothing, AbstractString, Vector{<:AbstractString}} = nothing,\nimage_detail::AbstractString = "auto",\nattach_to_latest::Bool = true,\nverbose::Bool = true, api_key::String = OPENAI_API_KEY,\n    model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    http_kwargs::NamedTuple = (;\n        retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), \n    api_kwargs::NamedTuple = = (; max_tokens = 2500),\n    kwargs...)

Scans the provided image (image_url or image_path) with the goal provided in the prompt.

Can be used for many multi-modal tasks, such as: OCR (transcribe text in the image), image captioning, image classification, etc.

It's effectively a light wrapper around aigenerate call, which uses additional keyword arguments image_url, image_path, image_detail to be provided. At least one image source (url or path) must be provided.

Arguments

Returns

If return_all=false (default):

Use msg.content to access the extracted string.

If return_all=true:

See also: ai_str, aai_str, aigenerate, aiembed, aiclassify, aiextract, aitemplates

Notes

Example

Describe the provided image:

julia
msg = aiscan("Describe the image"; image_path="julia.png", model="gpt4v")\n# [ Info: Tokens: 1141 @ Cost: $0.0117 in 2.2 seconds\n# AIMessage("The image shows a logo consisting of the word "julia" written in lowercase")

You can provide multiple images at once as a vector and ask for "low" level of detail (cheaper):

julia
msg = aiscan("Describe the image"; image_path=["julia.png","python.png"], image_detail="low", model="gpt4v")

You can use this function as a nice and quick OCR (transcribe text in the image) with a template :OCRTask. Let's transcribe some SQL code from a screenshot (no more re-typing!):

julia
# Screenshot of some SQL code\nimage_url = "https://www.sqlservercentral.com/wp-content/uploads/legacy/8755f69180b7ac7ee76a69ae68ec36872a116ad4/24622.png"\nmsg = aiscan(:OCRTask; image_url, model="gpt4v", task="Transcribe the SQL code in the image.", api_kwargs=(; max_tokens=2500))\n\n# [ Info: Tokens: 362 @ Cost: $0.0045 in 2.5 seconds\n# AIMessage("```sql\n# update Orders <continue>\n\n# You can add syntax highlighting of the outputs via Markdown\nusing Markdown\nmsg.content |> Markdown.parse

Notice that we enforce max_tokens = 2500. That's because OpenAI seems to default to ~300 tokens, which provides incomplete outputs. Hence, we set this value to 2500 as a default. If you still get truncated outputs, increase this value.

source


# PromptingTools.aiscanMethod.
julia
aiscan(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiscan call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

source


# PromptingTools.aitemplatesFunction.
julia
aitemplates

Find easily the most suitable templates for your use case.

You can search by:

Keyword Arguments

Examples

Find available templates with aitemplates:

julia
tmps = aitemplates("JuliaExpertAsk")\n# Will surface one specific template\n# 1-element Vector{AITemplateMetadata}:\n# PromptingTools.AITemplateMetadata\n#   name: Symbol JuliaExpertAsk\n#   description: String "For asking questions about Julia language. Placeholders: `ask`"\n#   version: String "1"\n#   wordcount: Int64 237\n#   variables: Array{Symbol}((1,))\n#   system_preview: String "You are a world-class Julia language programmer with the knowledge of the latest syntax. Your commun"\n#   user_preview: String "# Question\n\n{{ask}}"\n#   source: String ""

The above gives you a good idea of what the template is about, what placeholders are available, and how much it would cost to use it (=wordcount).

Search for all Julia-related templates:

julia
tmps = aitemplates("Julia")\n# 2-element Vector{AITemplateMetadata}... -> more to come later!

If you are on VSCode, you can leverage nice tabular display with vscodedisplay:

julia
using DataFrames\ntmps = aitemplates("Julia") |> DataFrame |> vscodedisplay

I have my selected template, how do I use it? Just use the "name" in aigenerate or aiclassify like you see in the first example!

source


# PromptingTools.aitemplatesMethod.

Find the top-limit templates whose name or description fields partially match the query_key::String in TEMPLATE_METADATA.

source


# PromptingTools.aitemplatesMethod.

Find the top-limit templates where provided query_key::Regex matches either of name, description or previews or User or System messages in TEMPLATE_METADATA.

source


# PromptingTools.aitemplatesMethod.

Find the top-limit templates whose name::Symbol exactly matches the query_name::Symbol in TEMPLATE_METADATA.

source


# PromptingTools.aitoolsMethod.
julia
aitools(prompt_schema::AbstractAnthropicSchema, prompt::ALLOWED_PROMPT_TYPE;\n    kwargs...)\n    tools::Union{Type, Function, Method, AbstractTool, Vector} = Tool[],\n    verbose::Bool = true,\n    api_key::String = ANTHROPIC_API_KEY,\n    model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    no_system_message::Bool = false,\n    cache::Union{Nothing, Symbol} = nothing,\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = (;\n        tool_choice = nothing),\n    kwargs...)

Calls chat completion API with an optional tool call signature. It can receive both tools and standard string-based content. Ideal for agentic workflows with more complex cognitive architectures.

Difference to aigenerate: Response can be a tool call (structured)

Differences to aiextract: Can provide infinitely many tools (including Functions!) and then respond with the tool call's output.

Arguments

Example

julia
## Let's define a tool\nget_weather(location, date) = "The weather in $location on $date is 70 degrees."\n\nmsg = aitools("What's the weather in Tokyo on May 3rd, 2023?";\n    tools = get_weather, model = "claudeh")\nPT.execute_tool(get_weather, msg.tool_calls[1].args)\n# "The weather in Tokyo on 2023-05-03 is 70 degrees."\n\n# Ignores the tool\nmsg = aitools("What's your name?";\n    tools = get_weather, model = "claudeh")\n# I don't have a personal name, but you can call me your AI assistant!

How to have a multi-turn conversation with tools:

julia
conv = aitools("What's the weather in Tokyo on May 3rd, 2023?";\n    tools = get_weather, return_all = true, model = "claudeh")\n\ntool_msg = conv[end].tool_calls[1] # there can be multiple tool calls requested!!\n\n# Execute the output to the tool message content\ntool_msg.content = PT.execute_tool(get_weather, tool_msg.args)\n\n# Add the tool message to the conversation\npush!(conv, tool_msg)\n\n# Call LLM again with the updated conversation\nconv = aitools(\n    "And in New York?"; tools = get_weather, return_all = true, conversation = conv, model = "claudeh")\n# 6-element Vector{AbstractMessage}:\n# SystemMessage("Act as a helpful AI assistant")\n# UserMessage("What's the weather in Tokyo on May 3rd, 2023?")\n# AIToolRequest("-"; Tool Requests: 1)\n# ToolMessage("The weather in Tokyo on 2023-05-03 is 70 degrees.")\n# UserMessage("And in New York?")\n# AIToolRequest("-"; Tool Requests: 1)

source


# PromptingTools.aitoolsMethod.
julia
aitools(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;\n    tools::Union{Type, Function, Method, AbstractTool, Vector} = Tool[],\n    verbose::Bool = true,\n    api_key::String = OPENAI_API_KEY,\n    model::String = MODEL_CHAT,\n    return_all::Bool = false, dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    no_system_message::Bool = false,\n    http_kwargs::NamedTuple = (retry_non_idempotent = true,\n        retries = 5,\n        readtimeout = 120), api_kwargs::NamedTuple = (;\n        tool_choice = nothing),\n    strict::Union{Nothing, Bool} = nothing,\n    json_mode::Union{Nothing, Bool} = nothing,\n    name_user::Union{Nothing, String} = nothing,\n    name_assistant::Union{Nothing, String} = nothing,\n    kwargs...)

Calls chat completion API with an optional tool call signature. It can receive both tools and standard string-based content. Ideal for agentic workflows with more complex cognitive architectures.

Difference to aigenerate: Response can be a tool call (structured)

Differences to aiextract: Can provide infinitely many tools (including Functions!) and then respond with the tool call's output.

Arguments

Example

julia
## Let's define a tool\nget_weather(location, date) = "The weather in $location on $date is 70 degrees."\n\n## JSON mode request\nmsg = aitools("What's the weather in Tokyo on May 3rd, 2023?";\n    tools = get_weather,\n    json_mode = true)\nPT.execute_tool(get_weather, msg.tool_calls[1].args)\n# "The weather in Tokyo on 2023-05-03 is 70 degrees."\n\n# Function calling request\nmsg = aitools("What's the weather in Tokyo on May 3rd, 2023?";\n    tools = get_weather)\nPT.execute_tool(get_weather, msg.tool_calls[1].args)\n# "The weather in Tokyo on 2023-05-03 is 70 degrees."\n\n# Ignores the tool\nmsg = aitools("What's your name?";\n    tools = get_weather)\n# I don't have a personal name, but you can call me your AI assistant!

How to have a multi-turn conversation with tools:

julia
conv = aitools("What's the weather in Tokyo on May 3rd, 2023?";\n    tools = get_weather, return_all = true)\n\ntool_msg = conv[end].tool_calls[1] # there can be multiple tool calls requested!!\n\n# Execute the output to the tool message content\ntool_msg.content = PT.execute_tool(get_weather, tool_msg.args)\n\n# Add the tool message to the conversation\npush!(conv, tool_msg)\n\n# Call LLM again with the updated conversation\nconv = aitools(\n    "And in New York?"; tools = get_weather, return_all = true, conversation = conv)\n# 6-element Vector{AbstractMessage}:\n# SystemMessage("Act as a helpful AI assistant")\n# UserMessage("What's the weather in Tokyo on May 3rd, 2023?")\n# AIToolRequest("-"; Tool Requests: 1)\n# ToolMessage("The weather in Tokyo on 2023-05-03 is 70 degrees.")\n# UserMessage("And in New York?")\n# AIToolRequest("-"; Tool Requests: 1)

source


# PromptingTools.aitoolsMethod.
julia
aitools(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aitools call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

source


# PromptingTools.align_tracer!Method.

Aligns multiple tracers in the vector to have the same Parent and Thread IDs as the first item.

source


# PromptingTools.align_tracer!Method.

Aligns the tracer message, updating the parent_id, thread_id. Often used to align multiple tracers in the vector to have the same IDs.

source


# PromptingTools.anthropic_apiFunction.
julia
anthropic_api(\n    prompt_schema::AbstractAnthropicSchema,\n    messages::Vector{<:AbstractDict{String, <:Any}} = Vector{Dict{String, Any}}();\n    api_key::AbstractString = ANTHROPIC_API_KEY,\n    system::Union{Nothing, AbstractString, AbstractVector{<:AbstractDict}} = nothing,\n    endpoint::String = "messages",\n    max_tokens::Int = 2048,\n    model::String = "claude-3-haiku-20240307", http_kwargs::NamedTuple = NamedTuple(),\n    stream::Bool = false,\n    url::String = "https://api.anthropic.com/v1",\n    cache::Union{Nothing, Symbol} = nothing,\n    kwargs...)

Simple wrapper for a call to Anthropic API.

Keyword Arguments

source


# PromptingTools.anthropic_extra_headersMethod.
julia
anthropic_extra_headers

Adds API version and beta headers to the request.

Kwargs / Beta headers

source


# PromptingTools.auth_headerMethod.
julia
auth_header(api_key::Union{Nothing, AbstractString};\n    bearer::Bool = true,\n    x_api_key::Bool = false,\n    extra_headers::AbstractVector = Vector{\n        Pair{String, String},\n    }[],\n    kwargs...)

Creates the authentication headers for any API request. Assumes that the communication is done in JSON format.

Arguments

source


# PromptingTools.build_response_bodyMethod.
julia
build_response_body(\n    flavor::AnthropicStream, cb::AbstractStreamCallback; verbose::Bool = false, kwargs...)

Build the response body from the chunks to mimic receiving a standard response from the API.

Note: Limited functionality for now. Does NOT support tool use. Use standard responses for these.

source


# PromptingTools.build_response_bodyMethod.
julia
build_response_body(flavor::OpenAIStream, cb::AbstractStreamCallback; verbose::Bool = false, kwargs...)

Build the response body from the chunks to mimic receiving a standard response from the API.

Note: Limited functionality for now. Does NOT support tool use, refusals, logprobs. Use standard responses for these.

source


# PromptingTools.build_template_metadataFunction.
julia
build_template_metadata(\n    template::AbstractVector{<:AbstractMessage}, template_name::Symbol,\n    metadata_msgs::AbstractVector{<:MetadataMessage} = MetadataMessage[]; max_length::Int = 100)

Builds AITemplateMetadata for a given template based on the messages in template and other information.

AITemplateMetadata is a helper struct for easy searching and reviewing of templates via aitemplates().

Note: Assumes that there is only ever one UserMessage and SystemMessage (concatenates them together)

source


# PromptingTools.call_costMethod.
julia
call_cost(prompt_tokens::Int, completion_tokens::Int, model::String;\n    cost_of_token_prompt::Number = get(MODEL_REGISTRY,\n        model,\n        (; cost_of_token_prompt = 0.0)).cost_of_token_prompt,\n    cost_of_token_generation::Number = get(MODEL_REGISTRY, model,\n        (; cost_of_token_generation = 0.0)).cost_of_token_generation)\n\ncall_cost(msg, model::String)

Calculate the cost of a call based on the number of tokens in the message and the cost per token.

Arguments

Returns

Examples

julia
# Assuming MODEL_REGISTRY is set up with appropriate costs\nMODEL_REGISTRY = Dict(\n    "model1" => (cost_of_token_prompt = 0.05, cost_of_token_generation = 0.10),\n    "model2" => (cost_of_token_prompt = 0.07, cost_of_token_generation = 0.02)\n)\n\ncost1 = call_cost(10, 20, "model1")\n\n# from message\nmsg1 = AIMessage(;tokens=[10, 20])  # 10 prompt tokens, 20 generation tokens\ncost1 = call_cost(msg1, "model1")\n# cost1 = 10 * 0.05 + 20 * 0.10 = 2.5\n\n# Using custom token costs\ncost2 = call_cost(10, 20, "model3"; cost_of_token_prompt = 0.08, cost_of_token_generation = 0.12)\n# cost2 = 10 * 0.08 + 20 * 0.12 = 3.2

source


# PromptingTools.call_cost_alternativeMethod.

call_cost_alternative()

Alternative cost calculation. Used to calculate cost of image generation with DALL-E 3 and similar.

source


# PromptingTools.callbackMethod.
julia
callback(cb::AbstractStreamCallback, chunk::StreamChunk; kwargs...)

Process the chunk to be printed and print it. It's a wrapper for two operations:

source


# PromptingTools.configure_callback!Method.
julia
configure_callback!(cb::StreamCallback, schema::AbstractPromptSchema;\n    api_kwargs...)

Configures the callback cb for streaming with a given prompt schema.

If no cb.flavor is provided, adjusts the flavor and the provided api_kwargs as necessary. Eg, for most schemas, we add kwargs like stream = true to the api_kwargs.

If cb.flavor is provided, both callback and api_kwargs are left unchanged! You need to configure them yourself!

source


', 73)), createBaseVNode("div", _hoisted_3, [ _cache[16] || (_cache[16] = createStaticVNode('# PromptingTools.create_templateMethod.
julia
create_template(; user::AbstractString, system::AbstractString="Act as a helpful AI assistant.", \n    load_as::Union{Nothing, Symbol, AbstractString} = nothing)\n\ncreate_template(system::AbstractString, user::AbstractString, \n    load_as::Union{Nothing, Symbol, AbstractString} = nothing)

Creates a simple template with a user and system message. Convenience function to prevent writing [PT.UserMessage(...), ...]

Arguments

', 10)), createBaseVNode("p", null, [ @@ -56,9 +56,9 @@ function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { _cache[14] || (_cache[14] = createBaseVNode("code", null, "kwargs", -1)), _cache[15] || (_cache[15] = createTextVNode(" during the AI call (see example).")) ]), - _cache[17] || (_cache[17] = createStaticVNode('

Returns a vector of SystemMessage and UserMessage objects. If load_as is provided, it registers the template in the TEMPLATE_STORE and TEMPLATE_METADATA as well.

Examples

Let's generate a quick template for a simple conversation (only one placeholder: name)

julia
# first system message, then user message (or use kwargs)\ntpl=PT.create_template("You must speak like a pirate", "Say hi to {{name}}")\n\n## 2-element Vector{PromptingTools.AbstractChatMessage}:\n## PromptingTools.SystemMessage("You must speak like a pirate")\n##  PromptingTools.UserMessage("Say hi to {{name}}")

You can immediately use this template in ai* functions:

julia
aigenerate(tpl; name="Jack Sparrow")\n# Output: AIMessage("Arr, me hearty! Best be sending me regards to Captain Jack Sparrow on the salty seas! May his compass always point true to the nearest treasure trove. Yarrr!")

If you're interested in saving the template in the template registry, jump to the end of these examples!

If you want to save it in your project folder:

julia
PT.save_template("templates/GreatingPirate.json", tpl; version="1.0") # optionally, add description

It will be saved and accessed under its basename, ie, GreatingPirate.

Now you can load it like all the other templates (provide the template directory):

julia
PT.load_templates!("templates") # it will remember the folder after the first run\n# Note: If you save it again, overwrite it, etc., you need to explicitly reload all templates again!

You can verify that your template is loaded with a quick search for "pirate":

julia
aitemplates("pirate")\n\n## 1-element Vector{AITemplateMetadata}:\n## PromptingTools.AITemplateMetadata\n##   name: Symbol GreatingPirate\n##   description: String ""\n##   version: String "1.0"\n##   wordcount: Int64 46\n##   variables: Array{Symbol}((1,))\n##   system_preview: String "You must speak like a pirate"\n##   user_preview: String "Say hi to {{name}}"\n##   source: String ""

Now you can use it like any other template (notice it's a symbol, so :GreatingPirate):

julia
aigenerate(:GreatingPirate; name="Jack Sparrow")\n# Output: AIMessage("Arr, me hearty! Best be sending me regards to Captain Jack Sparrow on the salty seas! May his compass always point true to the nearest treasure trove. Yarrr!")

If you do not need to save this template as a file, but you want to make it accessible in the template store for all ai* functions, you can use the load_as (= template name) keyword argument:

julia
# this will not only create the template, but also register it for immediate use\ntpl=PT.create_template("You must speak like a pirate", "Say hi to {{name}}"; load_as="GreatingPirate")\n\n# you can now use it like any other template\naiextract(:GreatingPirate; name="Jack Sparrow")

source

', 19)) + _cache[17] || (_cache[17] = createStaticVNode('

Returns a vector of SystemMessage and UserMessage objects. If load_as is provided, it registers the template in the TEMPLATE_STORE and TEMPLATE_METADATA as well.

Examples

Let's generate a quick template for a simple conversation (only one placeholder: name)

julia
# first system message, then user message (or use kwargs)\ntpl=PT.create_template("You must speak like a pirate", "Say hi to {{name}}")\n\n## 2-element Vector{PromptingTools.AbstractChatMessage}:\n## PromptingTools.SystemMessage("You must speak like a pirate")\n##  PromptingTools.UserMessage("Say hi to {{name}}")

You can immediately use this template in ai* functions:

julia
aigenerate(tpl; name="Jack Sparrow")\n# Output: AIMessage("Arr, me hearty! Best be sending me regards to Captain Jack Sparrow on the salty seas! May his compass always point true to the nearest treasure trove. Yarrr!")

If you're interested in saving the template in the template registry, jump to the end of these examples!

If you want to save it in your project folder:

julia
PT.save_template("templates/GreatingPirate.json", tpl; version="1.0") # optionally, add description

It will be saved and accessed under its basename, ie, GreatingPirate.

Now you can load it like all the other templates (provide the template directory):

julia
PT.load_templates!("templates") # it will remember the folder after the first run\n# Note: If you save it again, overwrite it, etc., you need to explicitly reload all templates again!

You can verify that your template is loaded with a quick search for "pirate":

julia
aitemplates("pirate")\n\n## 1-element Vector{AITemplateMetadata}:\n## PromptingTools.AITemplateMetadata\n##   name: Symbol GreatingPirate\n##   description: String ""\n##   version: String "1.0"\n##   wordcount: Int64 46\n##   variables: Array{Symbol}((1,))\n##   system_preview: String "You must speak like a pirate"\n##   user_preview: String "Say hi to {{name}}"\n##   source: String ""

Now you can use it like any other template (notice it's a symbol, so :GreatingPirate):

julia
aigenerate(:GreatingPirate; name="Jack Sparrow")\n# Output: AIMessage("Arr, me hearty! Best be sending me regards to Captain Jack Sparrow on the salty seas! May his compass always point true to the nearest treasure trove. Yarrr!")

If you do not need to save this template as a file, but you want to make it accessible in the template store for all ai* functions, you can use the load_as (= template name) keyword argument:

julia
# this will not only create the template, but also register it for immediate use\ntpl=PT.create_template("You must speak like a pirate", "Say hi to {{name}}"; load_as="GreatingPirate")\n\n# you can now use it like any other template\naiextract(:GreatingPirate; name="Jack Sparrow")

source

', 19)) ]), - _cache[50] || (_cache[50] = createStaticVNode('
# PromptingTools.decode_choicesMethod.
julia
decode_choices(schema::OpenAISchema,\n    choices::AbstractVector{<:AbstractString},\n    msg::AIMessage; model::AbstractString,\n    token_ids_map::Union{Nothing, Dict{<:AbstractString, <:Integer}} = nothing,\n    kwargs...)

Decodes the underlying AIMessage against the original choices to lookup what the category name was.

If it fails, it will return msg.content == nothing

source


# PromptingTools.detect_base_main_overridesMethod.
julia
detect_base_main_overrides(code_block::AbstractString)

Detects if a given code block overrides any Base or Main methods.

Returns a tuple of a boolean and a vector of the overriden methods.

source


# PromptingTools.distance_longest_common_subsequenceMethod.
julia
distance_longest_common_subsequence(\n    input1::AbstractString, input2::AbstractString)\n\ndistance_longest_common_subsequence(\n    input1::AbstractString, input2::AbstractVector{<:AbstractString})

Measures distance between two strings using the length of the longest common subsequence (ie, the lower the number, the better the match). Perfect match is distance = 0.0

Convenience wrapper around length_longest_common_subsequence to normalize the distances to 0-1 range. There is a also a dispatch for comparing a string vs an array of strings.

Notes

Arguments

Example

You can also use it to find the closest context for some AI generated summary/story:

julia
context = ["The enigmatic stranger vanished as swiftly as a wisp of smoke, leaving behind a trail of unanswered questions.",\n    "Beneath the shimmering moonlight, the ocean whispered secrets only the stars could hear.",\n    "The ancient tree stood as a silent guardian, its gnarled branches reaching for the heavens.",\n    "The melody danced through the air, painting a vibrant tapestry of emotions.",\n    "Time flowed like a relentless river, carrying away memories and leaving imprints in its wake."]\n\nstory = """\n    Beneath the shimmering moonlight, the ocean whispered secrets only the stars could hear.\n\n    Under the celestial tapestry, the vast ocean whispered its secrets to the indifferent stars. Each ripple, a murmured confidence, each wave, a whispered lament. The glittering celestial bodies listened in silent complicity, their enigmatic gaze reflecting the ocean's unspoken truths. The cosmic dance between the sea and the sky, a symphony of shared secrets, forever echoing in the ethereal expanse.\n    """\n\ndist = distance_longest_common_subsequence(story, context)\n@info "The closest context to the query: "$(first(story,20))..." is: "$(context[argmin(dist)])" (distance: $(minimum(dist)))"

source


# PromptingTools.encode_choicesMethod.
julia
encode_choices(schema::OpenAISchema, choices::AbstractVector{<:AbstractString};\n    model::AbstractString,\n    token_ids_map::Union{Nothing, Dict{<:AbstractString, <:Integer}} = nothing,\n    kwargs...)\n\nencode_choices(schema::OpenAISchema, choices::AbstractVector{T};\n    model::AbstractString,\n    token_ids_map::Union{Nothing, Dict{<:AbstractString, <:Integer}} = nothing,\n    kwargs...) where {T <: Tuple{<:AbstractString, <:AbstractString}}

Encode the choices into an enumerated list that can be interpolated into the prompt and creates the corresponding logit biases (to choose only from the selected tokens).

Optionally, can be a vector tuples, where the first element is the choice and the second is the description.

There can be at most 40 choices provided.

Arguments

Returns

Examples

julia
choices_prompt, logit_bias, _ = PT.encode_choices(PT.OpenAISchema(), ["true", "false"])\nchoices_prompt # Output: "true for "true"\nfalse for "false"\nlogit_bias # Output: Dict(837 => 100, 905 => 100)\n\nchoices_prompt, logit_bias, _ = PT.encode_choices(PT.OpenAISchema(), ["animal", "plant"])\nchoices_prompt # Output: "1. "animal"\n2. "plant""\nlogit_bias # Output: Dict(16 => 100, 17 => 100)

Or choices with descriptions:

julia
choices_prompt, logit_bias, _ = PT.encode_choices(PT.OpenAISchema(), [("A", "any animal or creature"), ("P", "for any plant or tree"), ("O", "for everything else")])\nchoices_prompt # Output: "1. "A" for any animal or creature\n2. "P" for any plant or tree\n3. "O" for everything else"\nlogit_bias # Output: Dict(16 => 100, 17 => 100, 18 => 100)

source


# PromptingTools.eval!Method.
julia
eval!(cb::AbstractCodeBlock;\n    safe_eval::Bool = true,\n    capture_stdout::Bool = true,\n    prefix::AbstractString = "",\n    suffix::AbstractString = "")

Evaluates a code block cb in-place. It runs automatically when AICode is instantiated with a String.

Check the outcome of evaluation with Base.isvalid(cb). If ==true, provide code block has executed successfully.

Steps:

Keyword Arguments

source


# PromptingTools.execute_toolMethod.
julia
execute_tool(f::Function, args::AbstractDict)

Executes a function with the provided arguments.

Dictionary is un-ordered, so we need to sort the arguments first and then pass them to the function.

source


# PromptingTools.extract_chunksMethod.
julia
extract_chunks(flavor::AbstractStreamFlavor, blob::AbstractString;\n    spillover::AbstractString = "", verbose::Bool = false, kwargs...)

Extract the chunks from the received SSE blob. Shared by all streaming flavors currently.

Returns a list of StreamChunk and the next spillover (if message was incomplete).

source


# PromptingTools.extract_code_blocksMethod.
julia
extract_code_blocks(markdown_content::String) -> Vector{String}

Extract Julia code blocks from a markdown string.

This function searches through the provided markdown content, identifies blocks of code specifically marked as Julia code (using the julia ... code fence patterns), and extracts the code within these blocks. The extracted code blocks are returned as a vector of strings, with each string representing one block of Julia code.

Note: Only the content within the code fences is extracted, and the code fences themselves are not included in the output.

See also: extract_code_blocks_fallback

Arguments

Returns

Examples

Example with a single Julia code block

julia
markdown_single = """

julia println("Hello, World!")

"""\nextract_code_blocks(markdown_single)\n# Output: ["Hello, World!"]
julia
# Example with multiple Julia code blocks\nmarkdown_multiple = """

julia x = 5

Some text in between

julia y = x + 2

"""\nextract_code_blocks(markdown_multiple)\n# Output: ["x = 5", "y = x + 2"]

source


# PromptingTools.extract_code_blocks_fallbackMethod.
julia
extract_code_blocks_fallback(markdown_content::String, delim::AbstractString="\\n```\\n")

Extract Julia code blocks from a markdown string using a fallback method (splitting by arbitrary delim-iters). Much more simplistic than extract_code_blocks and does not support nested code blocks.

It is often used as a fallback for smaller LLMs that forget to code fence julia ....

Example

julia
code = """

println("hello")

\nSome text

println("world")

"""\n\n# We extract text between triple backticks and check each blob if it looks like a valid Julia code\ncode_parsed = extract_code_blocks_fallback(code) |> x -> filter(is_julia_code, x) |> x -> join(x, "\n")

source


# PromptingTools.extract_contentMethod.
julia
extract_content(flavor::AnthropicStream, chunk)

Extract the content from the chunk.

source


# PromptingTools.extract_contentMethod.
julia
extract_content(flavor::OpenAIStream, chunk::StreamChunk; kwargs...)

Extract the content from the chunk.

source


# PromptingTools.extract_docstringMethod.

Extract the docstring from a type or function.

source


# PromptingTools.extract_function_nameMethod.
julia
extract_function_name(code_block::String) -> Union{String, Nothing}

Extract the name of a function from a given Julia code block. The function searches for two patterns:

If a function name is found, it is returned as a string. If no function name is found, the function returns nothing.

To capture all function names in the block, use extract_function_names.

Arguments

Returns

Example

julia
code = """\nfunction myFunction(arg1, arg2)\n    # Function body\nend\n"""\nextract_function_name(code)\n# Output: "myFunction"

source


# PromptingTools.extract_function_namesMethod.
julia
extract_function_names(code_block::AbstractString)

Extract one or more names of functions defined in a given Julia code block. The function searches for two patterns: - The explicit function declaration pattern: function name(...) ... end - The concise function declaration pattern: name(...) = ...

It always returns a vector of strings, even if only one function name is found (it will be empty).

For only one function name match, use extract_function_name.

source


# PromptingTools.extract_julia_importsMethod.
julia
extract_julia_imports(input::AbstractString; base_or_main::Bool = false)

Detects any using or import statements in a given string and returns the package names as a vector of symbols.

base_or_main is a boolean that determines whether to isolate only Base and Main OR whether to exclude them in the returned vector.

source


# PromptingTools.finalize_outputsMethod.
julia
finalize_outputs(prompt::ALLOWED_PROMPT_TYPE, conv_rendered::Any,\n    msg::Union{Nothing, AbstractMessage, AbstractVector{<:AbstractMessage}};\n    return_all::Bool = false,\n    dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    no_system_message::Bool = false,\n    kwargs...)

Finalizes the outputs of the ai* functions by either returning the conversation history or the last message.

Keyword arguments

source


# PromptingTools.finalize_tracerMethod.
julia
finalize_tracer(\n    tracer_schema::AbstractTracerSchema, tracer, msg_or_conv::Union{\n        AbstractMessage, AbstractVector{<:AbstractMessage}};\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Finalizes the calltracer of whatever is nedeed after the ai* calls. Use tracer_kwargs to provide any information necessary (eg, parent_id, thread_id, run_id).

In the default implementation, we convert all non-tracer messages into TracerMessage.

See also: meta, unwrap, SaverSchema, initialize_tracer

source


# PromptingTools.finalize_tracerMethod.
julia
finalize_tracer(\n    tracer_schema::SaverSchema, tracer, msg_or_conv::Union{\n        AbstractMessage, AbstractVector{<:AbstractMessage}};\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Finalizes the calltracer by saving the provided conversation msg_or_conv to the disk.

Default path is LOG_DIR/conversation__<first_msg_hash>__<time_received_str>.json, where LOG_DIR is set by user preferences or ENV variable (defaults to log/ in current working directory).

If you want to change the logging directory or the exact file name to log with, you can provide the following arguments to tracer_kwargs:

It can be composed with TracerSchema to also attach necessary metadata (see below).

Example

julia
wrap_schema = PT.SaverSchema(PT.TracerSchema(PT.OpenAISchema()))\nconv = aigenerate(wrap_schema,:BlankSystemUser; system="You're a French-speaking assistant!",\n    user="Say hi!"; model="gpt-4", api_kwargs=(;temperature=0.1), return_all=true)\n\n# conv is a vector of messages that will be saved to a JSON together with metadata about the template and api_kwargs

See also: meta, unwrap, TracerSchema, initialize_tracer

source


# PromptingTools.find_subsequence_positionsMethod.
julia
find_subsequence_positions(subseq, seq) -> Vector{Int}

Find all positions of a subsequence subseq within a larger sequence seq. Used to lookup positions of code blocks in markdown.

This function scans the sequence seq and identifies all starting positions where the subsequence subseq is found. Both subseq and seq should be vectors of integers, typically obtained using codeunits on strings.

Arguments

Returns

Examples

julia
find_subsequence_positions(codeunits("ab"), codeunits("cababcab")) # Returns [2, 5]

source


# PromptingTools.generate_structMethod.
julia
generate_struct(fields::Vector)

Generate a struct with the given name and fields. Fields can be specified simply as symbols (with default type String) or pairs of symbol and type. Field descriptions can be provided by adding a pair with the field name suffixed with "**description" (eg, :myfield**description => "My field description").

Returns: A tuple of (struct type, descriptions)

Examples

julia
Weather, descriptions = generate_struct(\n    [:location,\n     :temperature=>Float64,\n     :temperature__description=>"Temperature in degrees Fahrenheit",\n     :condition=>String,\n     :condition__description=>"Current weather condition (e.g., sunny, rainy, cloudy)"\n    ])

source


# PromptingTools.get_arg_namesMethod.

Get the argument names from a function, ignores keyword arguments!!

source


# PromptingTools.get_arg_namesMethod.

Get the argument names from a method, ignores keyword arguments!!

source


# PromptingTools.get_arg_typesMethod.

Get the argument types from a function, ignores keyword arguments!!

source


# PromptingTools.get_arg_typesMethod.

Get the argument types from a method, ignores keyword arguments!!

source


# PromptingTools.get_preferencesMethod.
julia
get_preferences(key::String)

Get preferences for PromptingTools. See ?PREFERENCES for more information.

See also: set_preferences!

Example

julia
PromptingTools.get_preferences("MODEL_CHAT")

source


# PromptingTools.ggi_generate_contentFunction.

Stub - to be extended in extension: GoogleGenAIPromptingToolsExt. ggi stands for GoogleGenAI

source


# PromptingTools.handle_error_messageMethod.
julia
handle_error_message(chunk::StreamChunk; throw_on_error::Bool = false, kwargs...)

Handles error messages from the streaming response.

source


# PromptingTools.has_julia_promptMethod.

Checks if a given string has a Julia prompt (julia>) at the beginning of a line.

source


# PromptingTools.initialize_tracerMethod.
julia
initialize_tracer(\n    tracer_schema::AbstractTracerSchema; model = "", tracer_kwargs = NamedTuple(),\n    prompt::ALLOWED_PROMPT_TYPE = "", kwargs...)

Initializes tracer/callback (if necessary). Can provide any keyword arguments in tracer_kwargs (eg, parent_id, thread_id, run_id). Is executed prior to the ai* calls.

By default it captures:

In the default implementation, we just collect the necessary data to build the tracer object in finalize_tracer.

See also: meta, unwrap, TracerSchema, SaverSchema, finalize_tracer

source


# PromptingTools.is_doneMethod.
julia
is_done(flavor, chunk)

Check if the streaming is done. Shared by all streaming flavors currently.

source


# PromptingTools.isextractedMethod.

Check if the object is an instance of AbstractExtractedData

source


# PromptingTools.last_messageMethod.

Helpful accessor for the last message in conversation. Returns the last message in the conversation.

source


# PromptingTools.last_outputMethod.

Helpful accessor for the last generated output (msg.content) in conversation. Returns the last output in the conversation (eg, the string/data in the last message).

source


# PromptingTools.length_longest_common_subsequenceMethod.
julia
length_longest_common_subsequence(itr1::AbstractString, itr2::AbstractString)

Compute the length of the longest common subsequence between two string sequences (ie, the higher the number, the better the match).

Source: https://cn.julialang.org/LeetCode.jl/dev/democards/problems/problems/1143.longest-common-subsequence/

Arguments

Returns

The length of the longest common subsequence.

Examples

julia
text1 = "abc-abc----"\ntext2 = "___ab_c__abc"\nlongest_common_subsequence(text1, text2)\n# Output: 6 (-> "abcabc")

It can be used to fuzzy match strings and find the similarity between them (Tip: normalize the match)

julia
commands = ["product recommendation", "emotions", "specific product advice", "checkout advice"]\nquery = "Which product can you recommend for me?"\nlet pos = argmax(length_longest_common_subsequence.(Ref(query), commands))\n    dist = length_longest_common_subsequence(query, commands[pos])\n    norm = dist / min(length(query), length(commands[pos]))\n    @info "The closest command to the query: "$(query)" is: "$(commands[pos])" (distance: $(dist), normalized: $(norm))"\nend

But it might be easier to use directly the convenience wrapper distance_longest_common_subsequence!

\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/utils.jl#L252-L288)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.list_aliases-Tuple{}' href='#PromptingTools.list_aliases-Tuple{}'>#</a>&nbsp;<b><u>PromptingTools.list_aliases</u></b> &mdash; <i>Method</i>.\n\n\n\n\nShows the Dictionary of model aliases in the registry. Add more with `MODEL_ALIASES[alias] = model_name`.\n\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/user_preferences.jl#L1009)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.list_registry-Tuple{}' href='#PromptingTools.list_registry-Tuple{}'>#</a>&nbsp;<b><u>PromptingTools.list_registry</u></b> &mdash; <i>Method</i>.\n\n\n\n\nShows the list of models in the registry. Add more with `register_model!`.\n\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/user_preferences.jl#L1007)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.load_api_keys!-Tuple{}' href='#PromptingTools.load_api_keys!-Tuple{}'>#</a>&nbsp;<b><u>PromptingTools.load_api_keys!</u></b> &mdash; <i>Method</i>.\n\n\n\n\nLoads API keys from environment variables and preferences\n\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/user_preferences.jl#L170)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.load_conversation-Tuple{Union{AbstractString, IO}}' href='#PromptingTools.load_conversation-Tuple{Union{AbstractString, IO}}'>#</a>&nbsp;<b><u>PromptingTools.load_conversation</u></b> &mdash; <i>Method</i>.\n\n\n\n\n```julia\nload_conversation(io_or_file::Union{IO, AbstractString})

Loads a conversation (messages) from io_or_file

source


# PromptingTools.load_templateMethod.
julia
load_template(io_or_file::Union{IO, AbstractString})

Loads messaging template from io_or_file and returns tuple of template messages and metadata.

source


# PromptingTools.load_templates!Function.
julia
load_templates!(dir_templates::Union{String, Nothing} = nothing;\n    remember_path::Bool = true,\n    remove_templates::Bool = isnothing(dir_templates),\n    store::Dict{Symbol, <:Any} = TEMPLATE_STORE,\n    metadata_store::Vector{<:AITemplateMetadata} = TEMPLATE_METADATA)

Loads templates from folder templates/ in the package root and stores them in TEMPLATE_STORE and TEMPLATE_METADATA.

Note: Automatically removes any existing templates and metadata from TEMPLATE_STORE and TEMPLATE_METADATA if remove_templates=true.

Arguments

Example

Load the default templates:

julia
PT.load_templates!() # no path needed

Load templates from a new custom path:

julia
PT.load_templates!("path/to/templates") # we will remember this path for future refresh

If you want to now refresh the default templates and the new path, just call load_templates!() without any arguments.

source


# PromptingTools.metaMethod.

Extracts the metadata dictionary from the tracer message or tracer-like object.

source


# PromptingTools.ollama_apiFunction.
julia
ollama_api(prompt_schema::Union{AbstractOllamaManagedSchema, AbstractOllamaSchema},\n    prompt::Union{AbstractString, Nothing} = nothing;\n    system::Union{Nothing, AbstractString} = nothing,\n    messages::Vector{<:AbstractMessage} = AbstractMessage[],\n    endpoint::String = "generate",\n    model::String = "llama2", http_kwargs::NamedTuple = NamedTuple(),\n    stream::Bool = false,\n    url::String = "localhost", port::Int = 11434,\n    kwargs...)

Simple wrapper for a call to Ollama API.

Keyword Arguments

source


# PromptingTools.parse_toolMethod.
julia
parse_tool(datatype::Type, blob::AbstractString)

Parse the JSON blob into the specified datatype in try-catch mode.

If parsing fails, it tries to return the untyped JSON blob in a dictionary.

source


# PromptingTools.pprintFunction.

Utility for pretty printing PromptingTools types in REPL.

source


# PromptingTools.pprintMethod.
julia
pprint(io::IO, conversation::AbstractVector{<:AbstractMessage})

Pretty print a vector of AbstractMessage to the given IO stream.

source


# PromptingTools.pprintMethod.
julia
pprint(io::IO, msg::AbstractMessage; text_width::Int = displaysize(io)[2])

Pretty print a single AbstractMessage to the given IO stream.

text_width is the width of the text to be displayed. If not provided, it defaults to the width of the given IO stream and add newline separators as needed.

source


# PromptingTools.previewFunction.

Utility for rendering the conversation (vector of messages) as markdown. REQUIRES the Markdown package to load the extension! See also pprint

source


# PromptingTools.print_contentMethod.
julia
print_content(out::Channel, text::AbstractString; kwargs...)

Print the content to the provided Channel out.

source


# PromptingTools.print_contentMethod.
julia
print_content(out::IO, text::AbstractString; kwargs...)

Print the content to the IO output stream out.

source


# PromptingTools.print_contentMethod.
julia
print_content(out::Nothing, text::Any)

Do nothing if the output stream is nothing.

source


# PromptingTools.push_conversation!Method.
julia
push_conversation!(conv_history, conversation::AbstractVector, max_history::Union{Int, Nothing})

Add a new conversation to the conversation history and resize the history if necessary.

This function appends a conversation to the conv_history, which is a vector of conversations. Each conversation is represented as a vector of AbstractMessage objects. After adding the new conversation, the history is resized according to the max_history parameter to ensure that the size of the history does not exceed the specified limit.

Arguments

Returns

The updated conversation history.

Example

julia
new_conversation = aigenerate("Hello World"; return_all = true)\npush_conversation!(PT.CONV_HISTORY, new_conversation, 10)

This is done automatically by the ai"" macros.

source


# PromptingTools.recursive_splitterMethod.
julia
recursive_splitter(text::AbstractString, separators::Vector{String}; max_length::Int=35000) -> Vector{String}

Split a given string text into chunks recursively using a series of separators, with each chunk having a maximum length of max_length (if it's achievable given the separators provided). This function is useful for splitting large documents or texts into smaller segments that are more manageable for processing, particularly for models or systems with limited context windows.

It was previously known as split_by_length.

This is similar to Langchain's RecursiveCharacterTextSplitter. To achieve the same behavior, use separators=["\\n\\n", "\\n", " ", ""].

Arguments

Returns

Vector{String}: A vector of strings, where each string is a chunk of the original text that is smaller than or equal to max_length.

Usage Tips

How It Works

Examples

Splitting text using multiple separators:

julia
text = "Paragraph 1\\n\\nParagraph 2. Sentence 1. Sentence 2.\\nParagraph 3"\nseparators = ["\\n\\n", ". ", "\\n"] # split by paragraphs, sentences, and newlines (not by words)\nchunks = recursive_splitter(text, separators, max_length=20)

Splitting text using multiple separators - with splitting on words:

julia
text = "Paragraph 1\\n\\nParagraph 2. Sentence 1. Sentence 2.\\nParagraph 3"\nseparators = ["\\n\\n", ". ", "\\n", " "] # split by paragraphs, sentences, and newlines, words\nchunks = recursive_splitter(text, separators, max_length=10)

Using a single separator:

julia
text = "Hello,World," ^ 2900  # length 34900 characters\nchunks = recursive_splitter(text, [","], max_length=10000)

To achieve the same behavior as Langchain's RecursiveCharacterTextSplitter, use separators=["\\n\\n", "\\n", " ", ""].

julia
text = "Paragraph 1\\n\\nParagraph 2. Sentence 1. Sentence 2.\\nParagraph 3"\nseparators = ["\\n\\n", "\\n", " ", ""]\nchunks = recursive_splitter(text, separators, max_length=10)

source


# PromptingTools.recursive_splitterMethod.
julia
recursive_splitter(text::String; separator::String=" ", max_length::Int=35000) -> Vector{String}

Split a given string text into chunks of a specified maximum length max_length. This is particularly useful for splitting larger documents or texts into smaller segments, suitable for models or systems with smaller context windows.

There is a method for dispatching on multiple separators, recursive_splitter(text::String, separators::Vector{String}; max_length::Int=35000) -> Vector{String} that mimics the logic of Langchain's RecursiveCharacterTextSplitter.

Arguments

Returns

Vector{String}: A vector of strings, each representing a chunk of the original text that is smaller than or equal to max_length.

Notes

Examples

Splitting text with the default separator (" "):

julia
text = "Hello world. How are you?"\nchunks = recursive_splitter(text; max_length=13)\nlength(chunks) # Output: 2

Using a custom separator and custom max_length

julia
text = "Hello,World," ^ 2900 # length 34900 chars\nrecursive_splitter(text; separator=",", max_length=10000) # for 4K context window\nlength(chunks[1]) # Output: 4

source


# PromptingTools.register_model!Function.
julia
register_model!(registry = MODEL_REGISTRY;\n    name::String,\n    schema::Union{AbstractPromptSchema, Nothing} = nothing,\n    cost_of_token_prompt::Float64 = 0.0,\n    cost_of_token_generation::Float64 = 0.0,\n    description::String = "")

Register a new AI model with name and its associated schema.

Registering a model helps with calculating the costs and automatically selecting the right prompt schema.

Arguments

source


# PromptingTools.remove_julia_promptMethod.
julia
remove_julia_prompt(s::T) where {T<:AbstractString}

If it detects a julia prompt, it removes it and all lines that do not have it (except for those that belong to the code block).

source


# PromptingTools.remove_templates!Method.
julia
    remove_templates!()

Removes all templates from TEMPLATE_STORE and TEMPLATE_METADATA.

source


# PromptingTools.remove_unsafe_linesMethod.

Iterates over the lines of a string and removes those that contain a package operation or a missing import.

source


# PromptingTools.renderMethod.

Renders provided messaging template (template) under the default schema (PROMPT_SCHEMA).

source


', 109)), + _cache[50] || (_cache[50] = createStaticVNode('
# PromptingTools.decode_choicesMethod.
julia
decode_choices(schema::OpenAISchema,\n    choices::AbstractVector{<:AbstractString},\n    msg::AIMessage; model::AbstractString,\n    token_ids_map::Union{Nothing, Dict{<:AbstractString, <:Integer}} = nothing,\n    kwargs...)

Decodes the underlying AIMessage against the original choices to lookup what the category name was.

If it fails, it will return msg.content == nothing

source


# PromptingTools.detect_base_main_overridesMethod.
julia
detect_base_main_overrides(code_block::AbstractString)

Detects if a given code block overrides any Base or Main methods.

Returns a tuple of a boolean and a vector of the overriden methods.

source


# PromptingTools.distance_longest_common_subsequenceMethod.
julia
distance_longest_common_subsequence(\n    input1::AbstractString, input2::AbstractString)\n\ndistance_longest_common_subsequence(\n    input1::AbstractString, input2::AbstractVector{<:AbstractString})

Measures distance between two strings using the length of the longest common subsequence (ie, the lower the number, the better the match). Perfect match is distance = 0.0

Convenience wrapper around length_longest_common_subsequence to normalize the distances to 0-1 range. There is a also a dispatch for comparing a string vs an array of strings.

Notes

Arguments

Example

You can also use it to find the closest context for some AI generated summary/story:

julia
context = ["The enigmatic stranger vanished as swiftly as a wisp of smoke, leaving behind a trail of unanswered questions.",\n    "Beneath the shimmering moonlight, the ocean whispered secrets only the stars could hear.",\n    "The ancient tree stood as a silent guardian, its gnarled branches reaching for the heavens.",\n    "The melody danced through the air, painting a vibrant tapestry of emotions.",\n    "Time flowed like a relentless river, carrying away memories and leaving imprints in its wake."]\n\nstory = """\n    Beneath the shimmering moonlight, the ocean whispered secrets only the stars could hear.\n\n    Under the celestial tapestry, the vast ocean whispered its secrets to the indifferent stars. Each ripple, a murmured confidence, each wave, a whispered lament. The glittering celestial bodies listened in silent complicity, their enigmatic gaze reflecting the ocean's unspoken truths. The cosmic dance between the sea and the sky, a symphony of shared secrets, forever echoing in the ethereal expanse.\n    """\n\ndist = distance_longest_common_subsequence(story, context)\n@info "The closest context to the query: "$(first(story,20))..." is: "$(context[argmin(dist)])" (distance: $(minimum(dist)))"

source


# PromptingTools.encode_choicesMethod.
julia
encode_choices(schema::OpenAISchema, choices::AbstractVector{<:AbstractString};\n    model::AbstractString,\n    token_ids_map::Union{Nothing, Dict{<:AbstractString, <:Integer}} = nothing,\n    kwargs...)\n\nencode_choices(schema::OpenAISchema, choices::AbstractVector{T};\n    model::AbstractString,\n    token_ids_map::Union{Nothing, Dict{<:AbstractString, <:Integer}} = nothing,\n    kwargs...) where {T <: Tuple{<:AbstractString, <:AbstractString}}

Encode the choices into an enumerated list that can be interpolated into the prompt and creates the corresponding logit biases (to choose only from the selected tokens).

Optionally, can be a vector tuples, where the first element is the choice and the second is the description.

There can be at most 40 choices provided.

Arguments

Returns

Examples

julia
choices_prompt, logit_bias, _ = PT.encode_choices(PT.OpenAISchema(), ["true", "false"])\nchoices_prompt # Output: "true for "true"\nfalse for "false"\nlogit_bias # Output: Dict(837 => 100, 905 => 100)\n\nchoices_prompt, logit_bias, _ = PT.encode_choices(PT.OpenAISchema(), ["animal", "plant"])\nchoices_prompt # Output: "1. "animal"\n2. "plant""\nlogit_bias # Output: Dict(16 => 100, 17 => 100)

Or choices with descriptions:

julia
choices_prompt, logit_bias, _ = PT.encode_choices(PT.OpenAISchema(), [("A", "any animal or creature"), ("P", "for any plant or tree"), ("O", "for everything else")])\nchoices_prompt # Output: "1. "A" for any animal or creature\n2. "P" for any plant or tree\n3. "O" for everything else"\nlogit_bias # Output: Dict(16 => 100, 17 => 100, 18 => 100)

source


# PromptingTools.eval!Method.
julia
eval!(cb::AbstractCodeBlock;\n    safe_eval::Bool = true,\n    capture_stdout::Bool = true,\n    prefix::AbstractString = "",\n    suffix::AbstractString = "")

Evaluates a code block cb in-place. It runs automatically when AICode is instantiated with a String.

Check the outcome of evaluation with Base.isvalid(cb). If ==true, provide code block has executed successfully.

Steps:

Keyword Arguments

source


# PromptingTools.execute_toolMethod.
julia
execute_tool(f::Function, args::AbstractDict)

Executes a function with the provided arguments.

Dictionary is un-ordered, so we need to sort the arguments first and then pass them to the function.

source


# PromptingTools.extract_chunksMethod.
julia
extract_chunks(flavor::AbstractStreamFlavor, blob::AbstractString;\n    spillover::AbstractString = "", verbose::Bool = false, kwargs...)

Extract the chunks from the received SSE blob. Shared by all streaming flavors currently.

Returns a list of StreamChunk and the next spillover (if message was incomplete).

source


# PromptingTools.extract_code_blocksMethod.
julia
extract_code_blocks(markdown_content::String) -> Vector{String}

Extract Julia code blocks from a markdown string.

This function searches through the provided markdown content, identifies blocks of code specifically marked as Julia code (using the julia ... code fence patterns), and extracts the code within these blocks. The extracted code blocks are returned as a vector of strings, with each string representing one block of Julia code.

Note: Only the content within the code fences is extracted, and the code fences themselves are not included in the output.

See also: extract_code_blocks_fallback

Arguments

Returns

Examples

Example with a single Julia code block

julia
markdown_single = """

julia println("Hello, World!")

"""\nextract_code_blocks(markdown_single)\n# Output: ["Hello, World!"]
julia
# Example with multiple Julia code blocks\nmarkdown_multiple = """

julia x = 5

Some text in between

julia y = x + 2

"""\nextract_code_blocks(markdown_multiple)\n# Output: ["x = 5", "y = x + 2"]

source


# PromptingTools.extract_code_blocks_fallbackMethod.
julia
extract_code_blocks_fallback(markdown_content::String, delim::AbstractString="\\n```\\n")

Extract Julia code blocks from a markdown string using a fallback method (splitting by arbitrary delim-iters). Much more simplistic than extract_code_blocks and does not support nested code blocks.

It is often used as a fallback for smaller LLMs that forget to code fence julia ....

Example

julia
code = """

println("hello")

\nSome text

println("world")

"""\n\n# We extract text between triple backticks and check each blob if it looks like a valid Julia code\ncode_parsed = extract_code_blocks_fallback(code) |> x -> filter(is_julia_code, x) |> x -> join(x, "\n")

source


# PromptingTools.extract_contentMethod.
julia
extract_content(flavor::AnthropicStream, chunk)

Extract the content from the chunk.

source


# PromptingTools.extract_contentMethod.
julia
extract_content(flavor::OpenAIStream, chunk::StreamChunk; kwargs...)

Extract the content from the chunk.

source


# PromptingTools.extract_docstringMethod.

Extract the docstring from a type or function.

source


# PromptingTools.extract_function_nameMethod.
julia
extract_function_name(code_block::String) -> Union{String, Nothing}

Extract the name of a function from a given Julia code block. The function searches for two patterns:

If a function name is found, it is returned as a string. If no function name is found, the function returns nothing.

To capture all function names in the block, use extract_function_names.

Arguments

Returns

Example

julia
code = """\nfunction myFunction(arg1, arg2)\n    # Function body\nend\n"""\nextract_function_name(code)\n# Output: "myFunction"

source


# PromptingTools.extract_function_namesMethod.
julia
extract_function_names(code_block::AbstractString)

Extract one or more names of functions defined in a given Julia code block. The function searches for two patterns: - The explicit function declaration pattern: function name(...) ... end - The concise function declaration pattern: name(...) = ...

It always returns a vector of strings, even if only one function name is found (it will be empty).

For only one function name match, use extract_function_name.

source


# PromptingTools.extract_julia_importsMethod.
julia
extract_julia_imports(input::AbstractString; base_or_main::Bool = false)

Detects any using or import statements in a given string and returns the package names as a vector of symbols.

base_or_main is a boolean that determines whether to isolate only Base and Main OR whether to exclude them in the returned vector.

source


# PromptingTools.finalize_outputsMethod.
julia
finalize_outputs(prompt::ALLOWED_PROMPT_TYPE, conv_rendered::Any,\n    msg::Union{Nothing, AbstractMessage, AbstractVector{<:AbstractMessage}};\n    return_all::Bool = false,\n    dry_run::Bool = false,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    no_system_message::Bool = false,\n    kwargs...)

Finalizes the outputs of the ai* functions by either returning the conversation history or the last message.

Keyword arguments

source


# PromptingTools.finalize_tracerMethod.
julia
finalize_tracer(\n    tracer_schema::AbstractTracerSchema, tracer, msg_or_conv::Union{\n        AbstractMessage, AbstractVector{<:AbstractMessage}};\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Finalizes the calltracer of whatever is nedeed after the ai* calls. Use tracer_kwargs to provide any information necessary (eg, parent_id, thread_id, run_id).

In the default implementation, we convert all non-tracer messages into TracerMessage.

See also: meta, unwrap, SaverSchema, initialize_tracer

source


# PromptingTools.finalize_tracerMethod.
julia
finalize_tracer(\n    tracer_schema::SaverSchema, tracer, msg_or_conv::Union{\n        AbstractMessage, AbstractVector{<:AbstractMessage}};\n    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Finalizes the calltracer by saving the provided conversation msg_or_conv to the disk.

Default path is LOG_DIR/conversation__<first_msg_hash>__<time_received_str>.json, where LOG_DIR is set by user preferences or ENV variable (defaults to log/ in current working directory).

If you want to change the logging directory or the exact file name to log with, you can provide the following arguments to tracer_kwargs:

It can be composed with TracerSchema to also attach necessary metadata (see below).

Example

julia
wrap_schema = PT.SaverSchema(PT.TracerSchema(PT.OpenAISchema()))\nconv = aigenerate(wrap_schema,:BlankSystemUser; system="You're a French-speaking assistant!",\n    user="Say hi!"; model="gpt-4", api_kwargs=(;temperature=0.1), return_all=true)\n\n# conv is a vector of messages that will be saved to a JSON together with metadata about the template and api_kwargs

See also: meta, unwrap, TracerSchema, initialize_tracer

source


# PromptingTools.find_subsequence_positionsMethod.
julia
find_subsequence_positions(subseq, seq) -> Vector{Int}

Find all positions of a subsequence subseq within a larger sequence seq. Used to lookup positions of code blocks in markdown.

This function scans the sequence seq and identifies all starting positions where the subsequence subseq is found. Both subseq and seq should be vectors of integers, typically obtained using codeunits on strings.

Arguments

Returns

Examples

julia
find_subsequence_positions(codeunits("ab"), codeunits("cababcab")) # Returns [2, 5]

source


# PromptingTools.generate_structMethod.
julia
generate_struct(fields::Vector)

Generate a struct with the given name and fields. Fields can be specified simply as symbols (with default type String) or pairs of symbol and type. Field descriptions can be provided by adding a pair with the field name suffixed with "**description" (eg, :myfield**description => "My field description").

Returns: A tuple of (struct type, descriptions)

Examples

julia
Weather, descriptions = generate_struct(\n    [:location,\n     :temperature=>Float64,\n     :temperature__description=>"Temperature in degrees Fahrenheit",\n     :condition=>String,\n     :condition__description=>"Current weather condition (e.g., sunny, rainy, cloudy)"\n    ])

source


# PromptingTools.get_arg_namesMethod.

Get the argument names from a function, ignores keyword arguments!!

source


# PromptingTools.get_arg_namesMethod.

Get the argument names from a method, ignores keyword arguments!!

source


# PromptingTools.get_arg_typesMethod.

Get the argument types from a function, ignores keyword arguments!!

source


# PromptingTools.get_arg_typesMethod.

Get the argument types from a method, ignores keyword arguments!!

source


# PromptingTools.get_preferencesMethod.
julia
get_preferences(key::String)

Get preferences for PromptingTools. See ?PREFERENCES for more information.

See also: set_preferences!

Example

julia
PromptingTools.get_preferences("MODEL_CHAT")

source


# PromptingTools.ggi_generate_contentFunction.

Stub - to be extended in extension: GoogleGenAIPromptingToolsExt. ggi stands for GoogleGenAI

source


# PromptingTools.handle_error_messageMethod.
julia
handle_error_message(chunk::StreamChunk; throw_on_error::Bool = false, kwargs...)

Handles error messages from the streaming response.

source


# PromptingTools.has_julia_promptMethod.

Checks if a given string has a Julia prompt (julia>) at the beginning of a line.

source


# PromptingTools.initialize_tracerMethod.
julia
initialize_tracer(\n    tracer_schema::AbstractTracerSchema; model = "", tracer_kwargs = NamedTuple(),\n    prompt::ALLOWED_PROMPT_TYPE = "", kwargs...)

Initializes tracer/callback (if necessary). Can provide any keyword arguments in tracer_kwargs (eg, parent_id, thread_id, run_id). Is executed prior to the ai* calls.

By default it captures:

In the default implementation, we just collect the necessary data to build the tracer object in finalize_tracer.

See also: meta, unwrap, TracerSchema, SaverSchema, finalize_tracer

source


# PromptingTools.is_doneMethod.
julia
is_done(flavor, chunk)

Check if the streaming is done. Shared by all streaming flavors currently.

source


# PromptingTools.isextractedMethod.

Check if the object is an instance of AbstractExtractedData

source


# PromptingTools.last_messageMethod.

Helpful accessor for the last message in conversation. Returns the last message in the conversation.

source


# PromptingTools.last_outputMethod.

Helpful accessor for the last generated output (msg.content) in conversation. Returns the last output in the conversation (eg, the string/data in the last message).

source


# PromptingTools.length_longest_common_subsequenceMethod.
julia
length_longest_common_subsequence(itr1::AbstractString, itr2::AbstractString)

Compute the length of the longest common subsequence between two string sequences (ie, the higher the number, the better the match).

Source: https://cn.julialang.org/LeetCode.jl/dev/democards/problems/problems/1143.longest-common-subsequence/

Arguments

Returns

The length of the longest common subsequence.

Examples

julia
text1 = "abc-abc----"\ntext2 = "___ab_c__abc"\nlongest_common_subsequence(text1, text2)\n# Output: 6 (-> "abcabc")

It can be used to fuzzy match strings and find the similarity between them (Tip: normalize the match)

julia
commands = ["product recommendation", "emotions", "specific product advice", "checkout advice"]\nquery = "Which product can you recommend for me?"\nlet pos = argmax(length_longest_common_subsequence.(Ref(query), commands))\n    dist = length_longest_common_subsequence(query, commands[pos])\n    norm = dist / min(length(query), length(commands[pos]))\n    @info "The closest command to the query: "$(query)" is: "$(commands[pos])" (distance: $(dist), normalized: $(norm))"\nend

But it might be easier to use directly the convenience wrapper distance_longest_common_subsequence!

\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/utils.jl#L252-L288)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.list_aliases-Tuple{}' href='#PromptingTools.list_aliases-Tuple{}'>#</a>&nbsp;<b><u>PromptingTools.list_aliases</u></b> &mdash; <i>Method</i>.\n\n\n\n\nShows the Dictionary of model aliases in the registry. Add more with `MODEL_ALIASES[alias] = model_name`.\n\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/user_preferences.jl#L1009)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.list_registry-Tuple{}' href='#PromptingTools.list_registry-Tuple{}'>#</a>&nbsp;<b><u>PromptingTools.list_registry</u></b> &mdash; <i>Method</i>.\n\n\n\n\nShows the list of models in the registry. Add more with `register_model!`.\n\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/user_preferences.jl#L1007)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.load_api_keys!-Tuple{}' href='#PromptingTools.load_api_keys!-Tuple{}'>#</a>&nbsp;<b><u>PromptingTools.load_api_keys!</u></b> &mdash; <i>Method</i>.\n\n\n\n\nLoads API keys from environment variables and preferences\n\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/user_preferences.jl#L170)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.load_conversation-Tuple{Union{AbstractString, IO}}' href='#PromptingTools.load_conversation-Tuple{Union{AbstractString, IO}}'>#</a>&nbsp;<b><u>PromptingTools.load_conversation</u></b> &mdash; <i>Method</i>.\n\n\n\n\n```julia\nload_conversation(io_or_file::Union{IO, AbstractString})

Loads a conversation (messages) from io_or_file

source


# PromptingTools.load_templateMethod.
julia
load_template(io_or_file::Union{IO, AbstractString})

Loads messaging template from io_or_file and returns tuple of template messages and metadata.

source


# PromptingTools.load_templates!Function.
julia
load_templates!(dir_templates::Union{String, Nothing} = nothing;\n    remember_path::Bool = true,\n    remove_templates::Bool = isnothing(dir_templates),\n    store::Dict{Symbol, <:Any} = TEMPLATE_STORE,\n    metadata_store::Vector{<:AITemplateMetadata} = TEMPLATE_METADATA)

Loads templates from folder templates/ in the package root and stores them in TEMPLATE_STORE and TEMPLATE_METADATA.

Note: Automatically removes any existing templates and metadata from TEMPLATE_STORE and TEMPLATE_METADATA if remove_templates=true.

Arguments

Example

Load the default templates:

julia
PT.load_templates!() # no path needed

Load templates from a new custom path:

julia
PT.load_templates!("path/to/templates") # we will remember this path for future refresh

If you want to now refresh the default templates and the new path, just call load_templates!() without any arguments.

source


# PromptingTools.metaMethod.

Extracts the metadata dictionary from the tracer message or tracer-like object.

source


# PromptingTools.ollama_apiFunction.
julia
ollama_api(prompt_schema::Union{AbstractOllamaManagedSchema, AbstractOllamaSchema},\n    prompt::Union{AbstractString, Nothing} = nothing;\n    system::Union{Nothing, AbstractString} = nothing,\n    messages::Vector{<:AbstractMessage} = AbstractMessage[],\n    endpoint::String = "generate",\n    model::String = "llama2", http_kwargs::NamedTuple = NamedTuple(),\n    stream::Bool = false,\n    url::String = "localhost", port::Int = 11434,\n    kwargs...)

Simple wrapper for a call to Ollama API.

Keyword Arguments

source


# PromptingTools.parse_toolMethod.
julia
parse_tool(datatype::Type, blob::AbstractString)

Parse the JSON blob into the specified datatype in try-catch mode.

If parsing fails, it tries to return the untyped JSON blob in a dictionary.

source


# PromptingTools.pprintFunction.

Utility for pretty printing PromptingTools types in REPL.

source


# PromptingTools.pprintMethod.
julia
pprint(io::IO, conversation::AbstractVector{<:AbstractMessage})

Pretty print a vector of AbstractMessage to the given IO stream.

source


# PromptingTools.pprintMethod.
julia
pprint(io::IO, msg::AbstractMessage; text_width::Int = displaysize(io)[2])

Pretty print a single AbstractMessage to the given IO stream.

text_width is the width of the text to be displayed. If not provided, it defaults to the width of the given IO stream and add newline separators as needed.

source


# PromptingTools.previewFunction.

Utility for rendering the conversation (vector of messages) as markdown. REQUIRES the Markdown package to load the extension! See also pprint

source


# PromptingTools.print_contentMethod.
julia
print_content(out::Channel, text::AbstractString; kwargs...)

Print the content to the provided Channel out.

source


# PromptingTools.print_contentMethod.
julia
print_content(out::IO, text::AbstractString; kwargs...)

Print the content to the IO output stream out.

source


# PromptingTools.print_contentMethod.
julia
print_content(out::Nothing, text::Any)

Do nothing if the output stream is nothing.

source


# PromptingTools.push_conversation!Method.
julia
push_conversation!(conv_history, conversation::AbstractVector, max_history::Union{Int, Nothing})

Add a new conversation to the conversation history and resize the history if necessary.

This function appends a conversation to the conv_history, which is a vector of conversations. Each conversation is represented as a vector of AbstractMessage objects. After adding the new conversation, the history is resized according to the max_history parameter to ensure that the size of the history does not exceed the specified limit.

Arguments

Returns

The updated conversation history.

Example

julia
new_conversation = aigenerate("Hello World"; return_all = true)\npush_conversation!(PT.CONV_HISTORY, new_conversation, 10)

This is done automatically by the ai"" macros.

source


# PromptingTools.recursive_splitterMethod.
julia
recursive_splitter(text::AbstractString, separators::Vector{String}; max_length::Int=35000) -> Vector{String}

Split a given string text into chunks recursively using a series of separators, with each chunk having a maximum length of max_length (if it's achievable given the separators provided). This function is useful for splitting large documents or texts into smaller segments that are more manageable for processing, particularly for models or systems with limited context windows.

It was previously known as split_by_length.

This is similar to Langchain's RecursiveCharacterTextSplitter. To achieve the same behavior, use separators=["\\n\\n", "\\n", " ", ""].

Arguments

Returns

Vector{String}: A vector of strings, where each string is a chunk of the original text that is smaller than or equal to max_length.

Usage Tips

How It Works

Examples

Splitting text using multiple separators:

julia
text = "Paragraph 1\\n\\nParagraph 2. Sentence 1. Sentence 2.\\nParagraph 3"\nseparators = ["\\n\\n", ". ", "\\n"] # split by paragraphs, sentences, and newlines (not by words)\nchunks = recursive_splitter(text, separators, max_length=20)

Splitting text using multiple separators - with splitting on words:

julia
text = "Paragraph 1\\n\\nParagraph 2. Sentence 1. Sentence 2.\\nParagraph 3"\nseparators = ["\\n\\n", ". ", "\\n", " "] # split by paragraphs, sentences, and newlines, words\nchunks = recursive_splitter(text, separators, max_length=10)

Using a single separator:

julia
text = "Hello,World," ^ 2900  # length 34900 characters\nchunks = recursive_splitter(text, [","], max_length=10000)

To achieve the same behavior as Langchain's RecursiveCharacterTextSplitter, use separators=["\\n\\n", "\\n", " ", ""].

julia
text = "Paragraph 1\\n\\nParagraph 2. Sentence 1. Sentence 2.\\nParagraph 3"\nseparators = ["\\n\\n", "\\n", " ", ""]\nchunks = recursive_splitter(text, separators, max_length=10)

source


# PromptingTools.recursive_splitterMethod.
julia
recursive_splitter(text::String; separator::String=" ", max_length::Int=35000) -> Vector{String}

Split a given string text into chunks of a specified maximum length max_length. This is particularly useful for splitting larger documents or texts into smaller segments, suitable for models or systems with smaller context windows.

There is a method for dispatching on multiple separators, recursive_splitter(text::String, separators::Vector{String}; max_length::Int=35000) -> Vector{String} that mimics the logic of Langchain's RecursiveCharacterTextSplitter.

Arguments

Returns

Vector{String}: A vector of strings, each representing a chunk of the original text that is smaller than or equal to max_length.

Notes

Examples

Splitting text with the default separator (" "):

julia
text = "Hello world. How are you?"\nchunks = recursive_splitter(text; max_length=13)\nlength(chunks) # Output: 2

Using a custom separator and custom max_length

julia
text = "Hello,World," ^ 2900 # length 34900 chars\nrecursive_splitter(text; separator=",", max_length=10000) # for 4K context window\nlength(chunks[1]) # Output: 4

source


# PromptingTools.register_model!Function.
julia
register_model!(registry = MODEL_REGISTRY;\n    name::String,\n    schema::Union{AbstractPromptSchema, Nothing} = nothing,\n    cost_of_token_prompt::Float64 = 0.0,\n    cost_of_token_generation::Float64 = 0.0,\n    description::String = "")

Register a new AI model with name and its associated schema.

Registering a model helps with calculating the costs and automatically selecting the right prompt schema.

Arguments

source


# PromptingTools.remove_julia_promptMethod.
julia
remove_julia_prompt(s::T) where {T<:AbstractString}

If it detects a julia prompt, it removes it and all lines that do not have it (except for those that belong to the code block).

source


# PromptingTools.remove_templates!Method.
julia
    remove_templates!()

Removes all templates from TEMPLATE_STORE and TEMPLATE_METADATA.

source


# PromptingTools.remove_unsafe_linesMethod.

Iterates over the lines of a string and removes those that contain a package operation or a missing import.

source


# PromptingTools.renderMethod.

Renders provided messaging template (template) under the default schema (PROMPT_SCHEMA).

source


', 109)), createBaseVNode("div", _hoisted_4, [ _cache[20] || (_cache[20] = createStaticVNode('# PromptingTools.renderMethod.
julia
render(schema::AbstractAnthropicSchema,\n    messages::Vector{<:AbstractMessage};\n    aiprefill::Union{Nothing, AbstractString} = nothing,\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    no_system_message::Bool = false,\n    cache::Union{Nothing, Symbol} = nothing,\n    kwargs...)
', 7)), createBaseVNode("p", null, [ @@ -66,9 +66,9 @@ function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { createBaseVNode("code", null, toDisplayString(_ctx.key) + "=>value", 1), _cache[19] || (_cache[19] = createTextVNode(" in the template.")) ]), - _cache[21] || (_cache[21] = createStaticVNode('

Keyword Arguments

source

', 3)) + _cache[21] || (_cache[21] = createStaticVNode('

Keyword Arguments

source

', 3)) ]), - _cache[51] || (_cache[51] = createStaticVNode('
# PromptingTools.renderMethod.
julia
render(schema::AbstractAnthropicSchema,\n    tools::Vector{<:AbstractTool};\n    kwargs...)

Renders the tool signatures into the Anthropic format.

source


', 3)), + _cache[51] || (_cache[51] = createStaticVNode('
# PromptingTools.renderMethod.
julia
render(schema::AbstractAnthropicSchema,\n    tools::Vector{<:AbstractTool};\n    kwargs...)

Renders the tool signatures into the Anthropic format.

source


', 3)), createBaseVNode("div", _hoisted_5, [ _cache[24] || (_cache[24] = createStaticVNode('# PromptingTools.renderMethod.
julia
render(schema::AbstractGoogleSchema,\n    messages::Vector{<:AbstractMessage};\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    no_system_message::Bool = false,\n    kwargs...)
', 7)), createBaseVNode("p", null, [ @@ -76,7 +76,7 @@ function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { createBaseVNode("code", null, toDisplayString(_ctx.key) + "=>value", 1), _cache[23] || (_cache[23] = createTextVNode(" in the template.")) ]), - _cache[25] || (_cache[25] = createStaticVNode('

Keyword Arguments

source

', 3)) + _cache[25] || (_cache[25] = createStaticVNode('

Keyword Arguments

source

', 3)) ]), _cache[52] || (_cache[52] = createBaseVNode("br", null, null, -1)), createBaseVNode("div", _hoisted_6, [ @@ -104,7 +104,7 @@ function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { ], -1)), _cache[32] || (_cache[32] = createBaseVNode("p", null, [ createBaseVNode("a", { - href: "https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/llm_ollama_managed.jl#L9-L21", + href: "https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/llm_ollama_managed.jl#L9-L21", target: "_blank", rel: "noreferrer" }, "source") @@ -118,7 +118,7 @@ function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { createBaseVNode("code", null, toDisplayString(_ctx.key) + "=>value", 1), _cache[34] || (_cache[34] = createTextVNode(" in the template.")) ]), - _cache[36] || (_cache[36] = createStaticVNode('

Keyword Arguments

source

', 3)) + _cache[36] || (_cache[36] = createStaticVNode('

Keyword Arguments

source

', 3)) ]), _cache[54] || (_cache[54] = createBaseVNode("br", null, null, -1)), createBaseVNode("div", _hoisted_8, [ @@ -128,9 +128,9 @@ function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { createBaseVNode("code", null, toDisplayString(_ctx.key) + "=>value", 1), _cache[38] || (_cache[38] = createTextVNode(" in the template.")) ]), - _cache[40] || (_cache[40] = createStaticVNode('

Keyword Arguments

source

', 3)) + _cache[40] || (_cache[40] = createStaticVNode('

Keyword Arguments

source

', 3)) ]), - _cache[55] || (_cache[55] = createStaticVNode('
# PromptingTools.renderMethod.
julia
render(schema::AbstractOpenAISchema,\n    tools::Vector{<:AbstractTool};\n    json_mode::Union{Nothing, Bool} = nothing,\n    kwargs...)

Renders the tool signatures into the OpenAI format.

source


# PromptingTools.renderMethod.
julia
render(tracer_schema::AbstractTracerSchema,\n    conv::AbstractVector{<:AbstractMessage}; kwargs...)

Passthrough. No changes.

source


', 5)), + _cache[55] || (_cache[55] = createStaticVNode('
# PromptingTools.renderMethod.
julia
render(schema::AbstractOpenAISchema,\n    tools::Vector{<:AbstractTool};\n    json_mode::Union{Nothing, Bool} = nothing,\n    kwargs...)

Renders the tool signatures into the OpenAI format.

source


# PromptingTools.renderMethod.
julia
render(tracer_schema::AbstractTracerSchema,\n    conv::AbstractVector{<:AbstractMessage}; kwargs...)

Passthrough. No changes.

source


', 5)), createBaseVNode("div", _hoisted_9, [ _cache[45] || (_cache[45] = createStaticVNode('# PromptingTools.renderMethod.
julia
render(schema::NoSchema,\n    messages::Vector{<:AbstractMessage};\n    conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],\n    no_system_message::Bool = false,\n    replacement_kwargs...)

Renders a conversation history from a vector of messages with all replacement variables specified in replacement_kwargs.

It is the first pass of the prompt rendering system, and is used by all other schemas.

Keyword Arguments

Notes

', 12)), createBaseVNode("ul", null, [ @@ -150,13 +150,13 @@ function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { ]), _cache[46] || (_cache[46] = createBaseVNode("p", null, [ createBaseVNode("a", { - href: "https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/llm_shared.jl#L11-L31", + href: "https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/llm_shared.jl#L11-L31", target: "_blank", rel: "noreferrer" }, "source") ], -1)) ]), - _cache[56] || (_cache[56] = createStaticVNode('
# PromptingTools.replace_wordsMethod.
julia
replace_words(text::AbstractString, words::Vector{<:AbstractString}; replacement::AbstractString="ABC")

Replace all occurrences of words in words with replacement in text. Useful to quickly remove specific names or entities from a text.

Arguments

Example

julia
text = "Disney is a great company"\nreplace_words(text, ["Disney", "Snow White", "Mickey Mouse"])\n# Output: "ABC is a great company"

source


# PromptingTools.resize_conversation!Method.
julia
resize_conversation!(conv_history, max_history::Union{Int, Nothing})

Resize the conversation history to a specified maximum length.

This function trims the conv_history to ensure that its size does not exceed max_history. It removes the oldest conversations first if the length of conv_history is greater than max_history.

Arguments

Returns

The resized conversation history.

Example

julia
resize_conversation!(PT.CONV_HISTORY, PT.MAX_HISTORY_LENGTH)

After the function call, conv_history will contain only the 10 most recent conversations.

This is done automatically by the ai"" macros.

source


# PromptingTools.response_to_messageMethod.
julia
response_to_message(schema::AbstractOpenAISchema,\n    MSG::Type{AIMessage},\n    choice,\n    resp;\n    model_id::AbstractString = "",\n    time::Float64 = 0.0,\n    run_id::Int = Int(rand(Int32)),\n    sample_id::Union{Nothing, Integer} = nothing,\n    name_assistant::Union{Nothing, String} = nothing)

Utility to facilitate unwrapping of HTTP response to a message type MSG provided for OpenAI-like responses

Note: Extracts finish_reason and log_prob if available in the response.

Arguments

source


# PromptingTools.response_to_messageMethod.

Utility to facilitate unwrapping of HTTP response to a message type MSG provided. Designed to handle multi-sample completions.

source


# PromptingTools.save_conversationMethod.
julia
save_conversation(io_or_file::Union{IO, AbstractString},\n    messages::AbstractVector{<:AbstractMessage})

Saves provided conversation (messages) to io_or_file. If you need to add some metadata, see save_template.

source


# PromptingTools.save_conversationsMethod.
julia
save_conversations(schema::AbstractPromptSchema, filename::AbstractString,\n    conversations::Vector{<:AbstractVector{<:PT.AbstractMessage}})

Saves provided conversations (vector of vectors of messages) to filename rendered in the particular schema.

Commonly used for finetuning models with schema = ShareGPTSchema()

The format is JSON Lines, where each line is a JSON object representing one provided conversation.

See also: save_conversation

Examples

You must always provide a VECTOR of conversations

julia
messages = AbstractMessage[SystemMessage("System message 1"),\n    UserMessage("User message"),\n    AIMessage("AI message")]\nconversation = [messages] # vector of vectors\n\ndir = tempdir()\nfn = joinpath(dir, "conversations.jsonl")\nsave_conversations(fn, conversation)\n\n# Content of the file (one line for each conversation)\n# {"conversations":[{"value":"System message 1","from":"system"},{"value":"User message","from":"human"},{"value":"AI message","from":"gpt"}]}

source


# PromptingTools.save_templateMethod.
julia
save_template(io_or_file::Union{IO, AbstractString},\n    messages::AbstractVector{<:AbstractChatMessage};\n    content::AbstractString = "Template Metadata",\n    description::AbstractString = "",\n    version::AbstractString = "1",\n    source::AbstractString = "")

Saves provided messaging template (messages) to io_or_file. Automatically adds metadata based on provided keyword arguments.

source


# PromptingTools.set_preferences!Method.
julia
set_preferences!(pairs::Pair{String, <:Any}...)

Set preferences for PromptingTools. See ?PREFERENCES for more information.

See also: get_preferences

Example

Change your API key and default model:

julia
PromptingTools.set_preferences!("OPENAI_API_KEY" => "key1", "MODEL_CHAT" => "chat1")

source


# PromptingTools.set_properties_strict!Method.
julia
set_properties_strict!(properties::AbstractDict)

Sets strict mode for the properties of a JSON schema.

Changes:

Reference: https://platform.openai.com/docs/guides/structured-outputs/supported-schemas

source


# PromptingTools.streamed_request!Method.
julia
streamed_request!(cb::AbstractStreamCallback, url, headers, input; kwargs...)

End-to-end wrapper for POST streaming requests. In-place modification of the callback object (cb.chunks) with the results of the request being returned. We build the body of the response object in the end and write it into the resp.body.

Returns the response object.

Arguments

source


', 21)), + _cache[56] || (_cache[56] = createStaticVNode('
# PromptingTools.replace_wordsMethod.
julia
replace_words(text::AbstractString, words::Vector{<:AbstractString}; replacement::AbstractString="ABC")

Replace all occurrences of words in words with replacement in text. Useful to quickly remove specific names or entities from a text.

Arguments

Example

julia
text = "Disney is a great company"\nreplace_words(text, ["Disney", "Snow White", "Mickey Mouse"])\n# Output: "ABC is a great company"

source


# PromptingTools.resize_conversation!Method.
julia
resize_conversation!(conv_history, max_history::Union{Int, Nothing})

Resize the conversation history to a specified maximum length.

This function trims the conv_history to ensure that its size does not exceed max_history. It removes the oldest conversations first if the length of conv_history is greater than max_history.

Arguments

Returns

The resized conversation history.

Example

julia
resize_conversation!(PT.CONV_HISTORY, PT.MAX_HISTORY_LENGTH)

After the function call, conv_history will contain only the 10 most recent conversations.

This is done automatically by the ai"" macros.

source


# PromptingTools.response_to_messageMethod.
julia
response_to_message(schema::AbstractOpenAISchema,\n    MSG::Type{AIMessage},\n    choice,\n    resp;\n    model_id::AbstractString = "",\n    time::Float64 = 0.0,\n    run_id::Int = Int(rand(Int32)),\n    sample_id::Union{Nothing, Integer} = nothing,\n    name_assistant::Union{Nothing, String} = nothing)

Utility to facilitate unwrapping of HTTP response to a message type MSG provided for OpenAI-like responses

Note: Extracts finish_reason and log_prob if available in the response.

Arguments

source


# PromptingTools.response_to_messageMethod.

Utility to facilitate unwrapping of HTTP response to a message type MSG provided. Designed to handle multi-sample completions.

source


# PromptingTools.save_conversationMethod.
julia
save_conversation(io_or_file::Union{IO, AbstractString},\n    messages::AbstractVector{<:AbstractMessage})

Saves provided conversation (messages) to io_or_file. If you need to add some metadata, see save_template.

source


# PromptingTools.save_conversationsMethod.
julia
save_conversations(schema::AbstractPromptSchema, filename::AbstractString,\n    conversations::Vector{<:AbstractVector{<:PT.AbstractMessage}})

Saves provided conversations (vector of vectors of messages) to filename rendered in the particular schema.

Commonly used for finetuning models with schema = ShareGPTSchema()

The format is JSON Lines, where each line is a JSON object representing one provided conversation.

See also: save_conversation

Examples

You must always provide a VECTOR of conversations

julia
messages = AbstractMessage[SystemMessage("System message 1"),\n    UserMessage("User message"),\n    AIMessage("AI message")]\nconversation = [messages] # vector of vectors\n\ndir = tempdir()\nfn = joinpath(dir, "conversations.jsonl")\nsave_conversations(fn, conversation)\n\n# Content of the file (one line for each conversation)\n# {"conversations":[{"value":"System message 1","from":"system"},{"value":"User message","from":"human"},{"value":"AI message","from":"gpt"}]}

source


# PromptingTools.save_templateMethod.
julia
save_template(io_or_file::Union{IO, AbstractString},\n    messages::AbstractVector{<:AbstractChatMessage};\n    content::AbstractString = "Template Metadata",\n    description::AbstractString = "",\n    version::AbstractString = "1",\n    source::AbstractString = "")

Saves provided messaging template (messages) to io_or_file. Automatically adds metadata based on provided keyword arguments.

source


# PromptingTools.set_preferences!Method.
julia
set_preferences!(pairs::Pair{String, <:Any}...)

Set preferences for PromptingTools. See ?PREFERENCES for more information.

See also: get_preferences

Example

Change your API key and default model:

julia
PromptingTools.set_preferences!("OPENAI_API_KEY" => "key1", "MODEL_CHAT" => "chat1")

source


# PromptingTools.set_properties_strict!Method.
julia
set_properties_strict!(properties::AbstractDict)

Sets strict mode for the properties of a JSON schema.

Changes:

Reference: https://platform.openai.com/docs/guides/structured-outputs/supported-schemas

source


# PromptingTools.streamed_request!Method.
julia
streamed_request!(cb::AbstractStreamCallback, url, headers, input; kwargs...)

End-to-end wrapper for POST streaming requests. In-place modification of the callback object (cb.chunks) with the results of the request being returned. We build the body of the response object in the end and write it into the resp.body.

Returns the response object.

Arguments

source


', 21)), _cache[57] || (_cache[57] = createBaseVNode("div", { style: { "border-width": "1px", "border-style": "solid", "border-color": "black", "padding": "1em", "border-radius": "25px" } }, [ createBaseVNode("a", { id: "PromptingTools.tool_call_signature-Tuple{Union{Method, Type}}", @@ -503,7 +503,7 @@ function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { ]), createTextVNode("\n"), createBaseVNode("span", { class: "line" }, [ - createBaseVNode("span", null, "[source](https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/extraction.jl#L341-L424)") + createBaseVNode("span", null, "[source](https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/extraction.jl#L341-L424)") ]), createTextVNode("\n"), createBaseVNode("span", { class: "line" }, [ @@ -713,13 +713,13 @@ function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { ]), createBaseVNode("p", null, [ createBaseVNode("a", { - href: "https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/extraction.jl#L505-L538", + href: "https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/extraction.jl#L505-L538", target: "_blank", rel: "noreferrer" }, "source") ]) ], -1)), - _cache[58] || (_cache[58] = createStaticVNode('
# PromptingTools.unique_permutationMethod.
julia
unique_permutation(inputs::AbstractVector)

Returns indices of unique items in a vector inputs. Access the unique values as inputs[unique_permutation(inputs)].

source


# PromptingTools.unwrapMethod.

Unwraps the tracer message or tracer-like object, returning the original object.

source


# PromptingTools.update_field_descriptions!Method.
julia
update_field_descriptions!(\n    parameters::Dict{String, <:Any}, descriptions::Dict{Symbol, <:AbstractString};\n    max_description_length::Int = 200)

Update the given JSON schema with descriptions from the descriptions dictionary. This function modifies the schema in-place, adding a "description" field to each property that has a corresponding entry in the descriptions dictionary.

Note: It modifies the schema in place. Only the top-level "properties" are updated!

Returns: The modified schema dictionary.

Arguments

Examples

julia
    parameters = Dict{String, Any}(\n        "properties" => Dict{String, Any}(\n            "location" => Dict{String, Any}("type" => "string"),\n            "condition" => Dict{String, Any}("type" => "string"),\n            "temperature" => Dict{String, Any}("type" => "number")\n        ),\n        "required" => ["location", "temperature", "condition"],\n        "type" => "object"\n    )\n    descriptions = Dict{Symbol, String}(\n        :temperature => "Temperature in degrees Fahrenheit",\n        :condition => "Current weather condition (e.g., sunny, rainy, cloudy)"\n    )\n    update_field_descriptions!(parameters, descriptions)

source


# PromptingTools.wrap_stringFunction.
julia
wrap_string(str::String,\n    text_width::Int = 20;\n    newline::Union{AbstractString, AbstractChar} = '

')

Breaks a string into lines of a given text_width. Optionally, you can specify the newline character or string to use.

Example:

julia
wrap_string("Certainly, here's a function in Julia that will wrap a string according to the specifications:", 10) |> print

source


# PromptingTools.@aai_strMacro.
julia
aai"user_prompt"[model_alias] -> AIMessage

Asynchronous version of @ai_str macro, which will log the result once it's ready.

See also aai!"" if you want an asynchronous reply to the provided message / continue the conversation.

Example

Send asynchronous request to GPT-4, so we don't have to wait for the response: Very practical with slow models, so you can keep working in the meantime.

julia
\n**...with some delay...**\n\n**[ Info: Tokens: 29 @ Cost: 0.0011\n in 2.7 seconds**\n\n**[ Info: AIMessage> Hello! How can I assist you today?**\n\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/macros.jl#L99-L116)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.@ai!_str-Tuple{Any, Vararg{Any}}' href='#PromptingTools.@ai!_str-Tuple{Any, Vararg{Any}}'>#</a>&nbsp;<b><u>PromptingTools.@ai!_str</u></b> &mdash; <i>Macro</i>.\n\n\n\n\n```julia\nai!"user_prompt"[model_alias] -> AIMessage

The ai!"" string macro is used to continue a previous conversation with the AI model.

It appends the new user prompt to the last conversation in the tracked history (in PromptingTools.CONV_HISTORY) and generates a response based on the entire conversation context. If you want to see the previous conversation, you can access it via PromptingTools.CONV_HISTORY, which keeps at most last PromptingTools.MAX_HISTORY_LENGTH conversations.

Arguments

Returns

AIMessage corresponding to the new user prompt, considering the entire conversation history.

Example

To continue a conversation:

julia
# start conversation as normal\nai"Say hi." \n\n# ... wait for reply and then react to it:\n\n# continue the conversation (notice that you can change the model, eg, to more powerful one for better answer)\nai!"What do you think about that?"gpt4t\n# AIMessage("Considering our previous discussion, I think that...")

Usage Notes

Important

Ensure that the conversation history is not too long to maintain relevancy and coherence in the AI's responses. The history length is managed by MAX_HISTORY_LENGTH.

source


# PromptingTools.@ai_strMacro.
julia
ai"user_prompt"[model_alias] -> AIMessage

The ai"" string macro generates an AI response to a given prompt by using aigenerate under the hood.

See also ai!"" if you want to reply to the provided message / continue the conversation.

Arguments

Returns

AIMessage corresponding to the input prompt.

Example

julia
result = ai"Hello, how are you?"\n# AIMessage("Hello! I'm an AI assistant, so I don't have feelings, but I'm here to help you. How can I assist you today?")

If you want to interpolate some variables or additional context, simply use string interpolation:

julia
a=1\nresult = ai"What is `$a+$a`?"\n# AIMessage("The sum of `1+1` is `2`.")

If you want to use a different model, eg, GPT-4, you can provide its alias as a flag:

julia
result = ai"What is `1.23 * 100 + 1`?"gpt4t\n# AIMessage("The answer is 124.")

source


# PromptingTools.@timeoutMacro.
julia
@timeout(seconds, expr_to_run, expr_when_fails)

Simple macro to run an expression with a timeout of seconds. If the expr_to_run fails to finish in seconds seconds, expr_when_fails is returned.

Example

julia
x = @timeout 1 begin\n    sleep(1.1)\n    println("done")\n    1\nend "failed"

source


', 15)) + _cache[58] || (_cache[58] = createStaticVNode('
# PromptingTools.unique_permutationMethod.
julia
unique_permutation(inputs::AbstractVector)

Returns indices of unique items in a vector inputs. Access the unique values as inputs[unique_permutation(inputs)].

source


# PromptingTools.unwrapMethod.

Unwraps the tracer message or tracer-like object, returning the original object.

source


# PromptingTools.update_field_descriptions!Method.
julia
update_field_descriptions!(\n    parameters::Dict{String, <:Any}, descriptions::Dict{Symbol, <:AbstractString};\n    max_description_length::Int = 200)

Update the given JSON schema with descriptions from the descriptions dictionary. This function modifies the schema in-place, adding a "description" field to each property that has a corresponding entry in the descriptions dictionary.

Note: It modifies the schema in place. Only the top-level "properties" are updated!

Returns: The modified schema dictionary.

Arguments

Examples

julia
    parameters = Dict{String, Any}(\n        "properties" => Dict{String, Any}(\n            "location" => Dict{String, Any}("type" => "string"),\n            "condition" => Dict{String, Any}("type" => "string"),\n            "temperature" => Dict{String, Any}("type" => "number")\n        ),\n        "required" => ["location", "temperature", "condition"],\n        "type" => "object"\n    )\n    descriptions = Dict{Symbol, String}(\n        :temperature => "Temperature in degrees Fahrenheit",\n        :condition => "Current weather condition (e.g., sunny, rainy, cloudy)"\n    )\n    update_field_descriptions!(parameters, descriptions)

source


# PromptingTools.wrap_stringFunction.
julia
wrap_string(str::String,\n    text_width::Int = 20;\n    newline::Union{AbstractString, AbstractChar} = '

')

Breaks a string into lines of a given text_width. Optionally, you can specify the newline character or string to use.

Example:

julia
wrap_string("Certainly, here's a function in Julia that will wrap a string according to the specifications:", 10) |> print

source


# PromptingTools.@aai_strMacro.
julia
aai"user_prompt"[model_alias] -> AIMessage

Asynchronous version of @ai_str macro, which will log the result once it's ready.

See also aai!"" if you want an asynchronous reply to the provided message / continue the conversation.

Example

Send asynchronous request to GPT-4, so we don't have to wait for the response: Very practical with slow models, so you can keep working in the meantime.

julia
\n**...with some delay...**\n\n**[ Info: Tokens: 29 @ Cost: 0.0011\n in 2.7 seconds**\n\n**[ Info: AIMessage> Hello! How can I assist you today?**\n\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/macros.jl#L99-L116)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.@ai!_str-Tuple{Any, Vararg{Any}}' href='#PromptingTools.@ai!_str-Tuple{Any, Vararg{Any}}'>#</a>&nbsp;<b><u>PromptingTools.@ai!_str</u></b> &mdash; <i>Macro</i>.\n\n\n\n\n```julia\nai!"user_prompt"[model_alias] -> AIMessage

The ai!"" string macro is used to continue a previous conversation with the AI model.

It appends the new user prompt to the last conversation in the tracked history (in PromptingTools.CONV_HISTORY) and generates a response based on the entire conversation context. If you want to see the previous conversation, you can access it via PromptingTools.CONV_HISTORY, which keeps at most last PromptingTools.MAX_HISTORY_LENGTH conversations.

Arguments

Returns

AIMessage corresponding to the new user prompt, considering the entire conversation history.

Example

To continue a conversation:

julia
# start conversation as normal\nai"Say hi." \n\n# ... wait for reply and then react to it:\n\n# continue the conversation (notice that you can change the model, eg, to more powerful one for better answer)\nai!"What do you think about that?"gpt4t\n# AIMessage("Considering our previous discussion, I think that...")

Usage Notes

Important

Ensure that the conversation history is not too long to maintain relevancy and coherence in the AI's responses. The history length is managed by MAX_HISTORY_LENGTH.

source


# PromptingTools.@ai_strMacro.
julia
ai"user_prompt"[model_alias] -> AIMessage

The ai"" string macro generates an AI response to a given prompt by using aigenerate under the hood.

See also ai!"" if you want to reply to the provided message / continue the conversation.

Arguments

Returns

AIMessage corresponding to the input prompt.

Example

julia
result = ai"Hello, how are you?"\n# AIMessage("Hello! I'm an AI assistant, so I don't have feelings, but I'm here to help you. How can I assist you today?")

If you want to interpolate some variables or additional context, simply use string interpolation:

julia
a=1\nresult = ai"What is `$a+$a`?"\n# AIMessage("The sum of `1+1` is `2`.")

If you want to use a different model, eg, GPT-4, you can provide its alias as a flag:

julia
result = ai"What is `1.23 * 100 + 1`?"gpt4t\n# AIMessage("The answer is 124.")

source


# PromptingTools.@timeoutMacro.
julia
@timeout(seconds, expr_to_run, expr_when_fails)

Simple macro to run an expression with a timeout of seconds. If the expr_to_run fails to finish in seconds seconds, expr_when_fails is returned.

Example

julia
x = @timeout 1 begin\n    sleep(1.1)\n    println("done")\n    1\nend "failed"

source


', 15)) ]); } const reference = /* @__PURE__ */ _export_sfc(_sfc_main, [["render", _sfc_render]]); diff --git a/previews/PR218/assets/reference_agenttools.md.D536xaTJ.js b/previews/PR218/assets/reference_agenttools.md.CE_B_eQV.js similarity index 97% rename from previews/PR218/assets/reference_agenttools.md.D536xaTJ.js rename to previews/PR218/assets/reference_agenttools.md.CE_B_eQV.js index 13b7f9e45..80bc98080 100644 --- a/previews/PR218/assets/reference_agenttools.md.D536xaTJ.js +++ b/previews/PR218/assets/reference_agenttools.md.CE_B_eQV.js @@ -3,7 +3,7 @@ const __pageData = JSON.parse('{"title":"Reference for AgentTools","description" const _sfc_main = { name: "reference_agenttools.md" }; function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { return openBlock(), createElementBlock("div", null, _cache[0] || (_cache[0] = [ - createStaticVNode('

Reference for AgentTools

# PromptingTools.Experimental.AgentToolsModule.
julia
AgentTools

Provides Agentic functionality providing lazy calls for building pipelines (eg, AIGenerate) and AICodeFixer.

This module is experimental and may change at any time. It is intended to be moved to a separate package in the future.

source


# PromptingTools.Experimental.AgentTools.AICallType.
julia
AICall(func::F, args...; kwargs...) where {F<:Function}\n\nAIGenerate(args...; kwargs...)\nAIEmbed(args...; kwargs...)\nAIExtract(args...; kwargs...)

A lazy call wrapper for AI functions in the PromptingTools module, such as aigenerate.

The AICall struct is designed to facilitate a deferred execution model (lazy evaluation) for AI functions that interact with a Language Learning Model (LLM). It stores the necessary information for an AI call and executes the underlying AI function only when supplied with a UserMessage or when the run! method is applied. This approach allows for more flexible and efficient handling of AI function calls, especially in interactive environments.

Seel also: run!, AICodeFixer

Fields

Example

Initiate an AICall like any ai* function, eg, AIGenerate:

julia
aicall = AICall(aigenerate)\n\n# With arguments and kwargs like ai* functions\n# from `aigenerate(schema, conversation; model="abc", api_kwargs=(; temperature=0.1))`\n# to\naicall = AICall(aigenerate, schema, conversation; model="abc", api_kwargs=(; temperature=0.1)\n\n# Or with a template\naicall = AIGenerate(:JuliaExpertAsk; ask="xyz", model="abc", api_kwargs=(; temperature=0.1))

Trigger the AICall with run! (it returns the update AICall struct back):

julia
aicall |> run!\n````\n\nYou can also use `AICall` as a functor to trigger the AI call with a `UserMessage` or simply the text to send:

julia aicall(UserMessage("Hello, world!")) # Triggers the lazy call result = run!(aicall) # Explicitly runs the AI call ``` This can be used to "reply" to previous message / continue the stored conversation

Notes

source


# PromptingTools.Experimental.AgentTools.AICodeFixerType.
julia
AICodeFixer(aicall::AICall, templates::Vector{<:PT.UserMessage}; num_rounds::Int = 3, feedback_func::Function = aicodefixer_feedback; kwargs...)\nAICodeFixer(aicall::AICall, template::Union{AITemplate, Symbol} = :CodeFixerRCI; kwargs...)

An AIAgent that iteratively evaluates any received Julia code and provides feedback back to the AI model if num_rounds>0. AICodeFixer manages the lifecycle of a code fixing session, including tracking conversation history, rounds of interaction, and applying user feedback through a specialized feedback function.

It integrates with lazy AI call structures like AIGenerate.

The operation is "lazy", ie, the agent is only executed when needed, eg, when run! is called.

Fields

Note: Any kwargs provided to run!() will be passed to the underlying AICall.

Example

Let's create an AIGenerate call and then pipe it to AICodeFixer to run a few rounds of the coding fixing:

julia
# Create an AIGenerate call\nlazy_call = AIGenerate("Write a function to do XYZ...")\n\n# the action starts only when `run!` is called\nresult = lazy_call |> AICodeFixer |> run!\n\n# Access the result of the code fixing session\n# result.call refers to the AIGenerate lazy call above\nconversation = result.call.conversation\nfixed_code = last(conversation) # usually in the last message\n\n# Preview the conversation history\npreview(conversation)

You can change the template used to provide user feedback and number of counds via arguments:

julia
# Setup an AIGenerate call\nlazy_call = AIGenerate(aigenerate, "Write code to do XYZ...")\n\n# Custom template and 2 fixing rounds\nresult = AICodeFixer(lazy_call, [PT.UserMessage("Please fix the code.\n\nFeedback: {{feedback}}")]; num_rounds = 2) |> run!\n\n# The result now contains the AI's attempts to fix the code\npreview(result.call.conversation)

Notes

source


# PromptingTools.Experimental.AgentTools.RetryConfigType.
julia
RetryConfig

Configuration for self-fixing the AI calls. It includes the following fields:

Fields

source


# PromptingTools.Experimental.AgentTools.SampleNodeType.
julia
SampleNode{T}

A node in the Monte Carlo Tree Search tree.

It's used to hold the data we're trying to optimize/discover (eg, a conversation), the scores from evaluation (wins, visits) and the results of the evaluations upon failure (feedback).

Fields

source


# PromptingTools.Experimental.AgentTools.ThompsonSamplingType.
julia
ThompsonSampling <: AbstractScoringMethod

Implements scoring and selection for Thompson Sampling method. See https://en.wikipedia.org/wiki/Thompson_sampling for more details.

source


# PromptingTools.Experimental.AgentTools.UCTType.
julia
UCT <: AbstractScoringMethod

Implements scoring and selection for UCT (Upper Confidence Bound for Trees) sampling method. See https://en.wikipedia.org/wiki/Monte_Carlo_tree_search#Exploration_and_exploitation for more details.

source


# PromptingTools.Experimental.AgentTools.AIClassifyMethod.
julia
AIClassify(args...; kwargs...)

Creates a lazy instance of aiclassify. It is an instance of AICall with aiclassify as the function.

Use exactly the same arguments and keyword arguments as aiclassify (see ?aiclassify for details).

source


# PromptingTools.Experimental.AgentTools.AIEmbedMethod.
julia
AIEmbed(args...; kwargs...)

Creates a lazy instance of aiembed. It is an instance of AICall with aiembed as the function.

Use exactly the same arguments and keyword arguments as aiembed (see ?aiembed for details).

source


# PromptingTools.Experimental.AgentTools.AIExtractMethod.
julia
AIExtract(args...; kwargs...)

Creates a lazy instance of aiextract. It is an instance of AICall with aiextract as the function.

Use exactly the same arguments and keyword arguments as aiextract (see ?aiextract for details).

source


# PromptingTools.Experimental.AgentTools.AIGenerateMethod.
julia
AIGenerate(args...; kwargs...)

Creates a lazy instance of aigenerate. It is an instance of AICall with aigenerate as the function.

Use exactly the same arguments and keyword arguments as aigenerate (see ?aigenerate for details).

source


# PromptingTools.Experimental.AgentTools.AIScanMethod.
julia
AIScan(args...; kwargs...)

Creates a lazy instance of aiscan. It is an instance of AICall with aiscan as the function.

Use exactly the same arguments and keyword arguments as aiscan (see ?aiscan for details).

source


# PromptingTools.Experimental.AgentTools.add_feedback!Method.
julia
add_feedback!(\n    conversation::AbstractVector{<:PT.AbstractMessage}, sample::SampleNode; feedback_inplace::Bool = false,\n    feedback_template::Symbol = :FeedbackFromEvaluator)

Adds formatted feedback to the conversation based on the sample node feedback (and its ancestors).

Arguments

Example

julia
sample = SampleNode(; data = nothing, feedback = "Feedback X")\nconversation = [PT.UserMessage("I say hi!"), PT.AIMessage(; content = "I say hi!")]\nconversation = AT.add_feedback!(conversation, sample)\nconversation[end].content == "### Feedback from Evaluator\\nFeedback X\\n"\n\nInplace feedback:

julia conversation = [PT.UserMessage("I say hi!"), PT.AIMessage(; content = "I say hi!")] conversation = AT.add_feedback!(conversation, sample; feedback_inplace = true) conversation[end].content == "I say hi!\\n\\n### Feedback from Evaluator\\nFeedback X\\n"

\nSample with ancestors with feedback:

julia sample_p = SampleNode(; data = nothing, feedback = "\\nFeedback X") sample = expand!(sample_p, nothing) sample.feedback = "\\nFeedback Y" conversation = [PT.UserMessage("I say hi!"), PT.AIMessage(; content = "I say hi!")] conversation = AT.add_feedback!(conversation, sample)

conversation[end].content == "### Feedback from Evaluator\\n\\nFeedback X\\n–––––\\n\\nFeedback Y\\n" ```

source


# PromptingTools.Experimental.AgentTools.aicodefixer_feedbackMethod.
julia
aicodefixer_feedback(cb::AICode; max_length::Int = 512) -> NamedTuple(; feedback::String)\naicodefixer_feedback(conversation::AbstractVector{<:PT.AbstractMessage}; max_length::Int = 512) -> NamedTuple(; feedback::String)\naicodefixer_feedback(msg::PT.AIMessage; max_length::Int = 512) -> NamedTuple(; feedback::String)\naicodefixer_feedback(aicall::AICall; max_length::Int = 512) -> NamedTuple(; feedback::String)

Generate feedback for an AI code fixing session based on the AICode block /or conversation history (that will be used to extract and evaluate a code block). Function is designed to be extensible for different types of feedback and code evaluation outcomes.

The highlevel wrapper accepts a conversation and returns new kwargs for the AICall.

Individual feedback functions are dispatched on different subtypes of AbstractCodeOutcome and can be extended/overwritten to provide more detailed feedback.

See also: AIGenerate, AICodeFixer

Arguments

Returns

Example

julia
cb = AICode(msg; skip_unsafe = true, capture_stdout = true)\nnew_kwargs = aicodefixer_feedback(cb)\n\nnew_kwargs = aicodefixer_feedback(msg)\nnew_kwargs = aicodefixer_feedback(conversation)

Notes

This function is part of the AI code fixing system, intended to interact with code in AIMessage and provide feedback on improving it.

The highlevel wrapper accepts a conversation and returns new kwargs for the AICall.

It dispatches for the code feedback based on the subtypes of AbstractCodeOutcome below:

You can override the individual methods to customize the feedback.

source


# PromptingTools.Experimental.AgentTools.airetry!Function.
julia
airetry!(\n    f_cond::Function, aicall::AICallBlock, feedback::Union{AbstractString, Function} = "";\n    verbose::Bool = true, throw::Bool = false, evaluate_all::Bool = true, feedback_expensive::Bool = false,\n    max_retries::Union{Nothing, Int} = nothing, retry_delay::Union{Nothing, Int} = nothing)

Evaluates the condition f_cond on the aicall object. If the condition is not met, it will return the best sample to retry from and provide feedback (string or function) to aicall. That's why it's mutating. It will retry maximum max_retries times, with throw=true, an error will be thrown if the condition is not met after max_retries retries.

Note: aicall must be run first via run!(aicall) before calling airetry!.

Function signatures

You can leverage the last_message, last_output, and AICode functions to access the last message, last output and execute code blocks in the conversation, respectively. See examples below.

Good Use Cases

Gotchas

Arguments

Returns

Example

You can use airetry! to catch API errors in run! and auto-retry the call. RetryConfig is how you influence all the subsequent retry behaviours - see ?RetryConfig for more details.

julia
# API failure because of a non-existent model\nout = AIGenerate("say hi!"; config = RetryConfig(; catch_errors = true),\n    model = "NOTEXIST")\nrun!(out) # fails\n\n# we ask to wait 2s between retries and retry 2 times (can be set in `config` in aicall as well)\nairetry!(isvalid, out; retry_delay = 2, max_retries = 2)

If you provide arguments to the aicall, we try to honor them as much as possible in the following calls, eg, set low verbosity

julia
out = AIGenerate("say hi!"; config = RetryConfig(; catch_errors = true),\nmodel = "NOTEXIST", verbose=false)\nrun!(out)\n# No info message, you just see `success = false` in the properties of the AICall

Let's show a toy example to demonstrate the runtime checks / guardrails for the model output. We'll play a color guessing game (I'm thinking "yellow"):

julia
# Notice that we ask for two samples (`n_samples=2`) at each attempt (to improve our chances). \n# Both guesses are scored at each time step, and the best one is chosen for the next step.\n# And with OpenAI, we can set `api_kwargs = (;n=2)` to get both samples simultaneously (cheaper and faster)!\nout = AIGenerate(\n    "Guess what color I'm thinking. It could be: blue, red, black, white, yellow. Answer with 1 word only";\n    verbose = false,\n    config = RetryConfig(; n_samples = 2), api_kwargs = (; n = 2))\nrun!(out)\n\n\n## Check that the output is 1 word only, third argument is the feedback that will be provided if the condition fails\n## Notice: functions operate on `aicall` as the only argument. We can use utilities like `last_output` and `last_message` to access the last message and output in the conversation.\nairetry!(x -> length(split(last_output(x), r" |\\.")) == 1, out,\n    "You must answer with 1 word only.")\n\n\n## Let's ensure that the output is in lowercase - simple and short\nairetry!(x -> all(islowercase, last_output(x)), out, "You must answer in lowercase.")\n# [ Info: Condition not met. Retrying...\n\n\n## Let's add final hint - it took us 2 retries\nairetry!(x -> startswith(last_output(x), "y"), out, "It starts with "y"")\n# [ Info: Condition not met. Retrying...\n# [ Info: Condition not met. Retrying...\n\n\n## We end up with the correct answer\nlast_output(out)\n# Output: "yellow"

Let's explore how we got here. We save the various attempts in a "tree" (SampleNode object) You can access it in out.samples, which is the ROOT of the tree (top level). Currently "active" sample ID is out.active_sample_id -> that's the same as conversation field in your AICall.

julia
# Root node:\nout.samples\n# Output: SampleNode(id: 46839, stats: 6/12, length: 2)\n\n# Active sample (our correct answer):\nout.active_sample_id \n# Output: 50086\n\n# Let's obtain the active sample node with this ID  - use getindex notation or function find_node\nout.samples[out.active_sample_id]\n# Output: SampleNode(id: 50086, stats: 1/1, length: 7)\n\n# The SampleNode has two key fields: data and feedback. Data is where the conversation is stored:\nactive_sample = out.samples[out.active_sample_id]\nactive_sample.data == out.conversation # Output: true -> This is the winning guess!

We also get a clear view of the tree structure of all samples with print_samples:

julia
julia> print_samples(out.samples)\nSampleNode(id: 46839, stats: 6/12, score: 0.5, length: 2)\n├─ SampleNode(id: 12940, stats: 5/8, score: 1.41, length: 4)\n│  ├─ SampleNode(id: 34315, stats: 3/4, score: 1.77, length: 6)\n│  │  ├─ SampleNode(id: 20493, stats: 1/1, score: 2.67, length: 7)\n│  │  └─ SampleNode(id: 50086, stats: 1/1, score: 2.67, length: 7)\n│  └─ SampleNode(id: 2733, stats: 1/2, score: 1.94, length: 5)\n└─ SampleNode(id: 48343, stats: 1/4, score: 1.36, length: 4)\n   ├─ SampleNode(id: 30088, stats: 0/1, score: 1.67, length: 5)\n   └─ SampleNode(id: 44816, stats: 0/1, score: 1.67, length: 5)

You can use the id to grab and inspect any of these nodes, eg,

julia
out.samples[2733]\n# Output: SampleNode(id: 2733, stats: 1/2, length: 5)

We can also iterate through all samples and extract whatever information we want with PostOrderDFS or PreOrderDFS (exported from AbstractTrees.jl)

julia
for sample in PostOrderDFS(out.samples)\n    # Data is the universal field for samples, we put `conversation` in there\n    # Last item in data is the last message in coversation\n    msg = sample.data[end]\n    if msg isa PT.AIMessage # skip feedback\n        # get only the message content, ie, the guess\n        println("ID: $(sample.id), Answer: $(msg.content)")\n    end\nend\n\n# ID: 20493, Answer: yellow\n# ID: 50086, Answer: yellow\n# ID: 2733, Answer: red\n# ID: 30088, Answer: blue\n# ID: 44816, Answer: blue

Note: airetry! will attempt to fix the model max_retries times. If you set throw=true, it will throw an ErrorException if the condition is not met after max_retries retries.

Let's define a mini program to guess the number and use airetry! to guide the model to the correct answer:

julia
"""\n    llm_guesser()\n\nMini program to guess the number provided by the user (betwee 1-100).\n"""\nfunction llm_guesser(user_number::Int)\n    @assert 1 <= user_number <= 100\n    prompt = """\nI'm thinking a number between 1-100. Guess which one it is. \nYou must respond only with digits and nothing else. \nYour guess:"""\n    ## 2 samples at a time, max 5 fixing rounds\n    out = AIGenerate(prompt; config = RetryConfig(; n_samples = 2, max_retries = 5),\n        api_kwargs = (; n = 2)) |> run!\n    ## Check the proper output format - must parse to Int, use do-syntax\n    ## We can provide feedback via a function!\n    function feedback_f(aicall)\n        "Output: $(last_output(aicall))\nFeedback: You must respond only with digits!!"\n    end\n    airetry!(out, feedback_f) do aicall\n        !isnothing(tryparse(Int, last_output(aicall)))\n    end\n    ## Give a hint on bounds\n    lower_bound = (user_number ÷ 10) * 10\n    upper_bound = lower_bound + 10\n    airetry!(\n        out, "The number is between or equal to $lower_bound to $upper_bound.") do aicall\n        guess = tryparse(Int, last_output(aicall))\n        lower_bound <= guess <= upper_bound\n    end\n    ## You can make at most 3x guess now -- if there is max_retries in `config.max_retries` left\n    max_retries = out.config.retries + 3\n    function feedback_f2(aicall)\n        guess = tryparse(Int, last_output(aicall))\n        "Your guess of $(guess) is wrong, it's $(abs(guess-user_number)) numbers away."\n    end\n    airetry!(out, feedback_f2; max_retries) do aicall\n        tryparse(Int, last_output(aicall)) == user_number\n    end\n\n    ## Evaluate the best guess\n    @info "Results: Guess: $(last_output(out)) vs User: $user_number (Number of calls made: $(out.config.calls))"\n    return out\nend\n\n# Let's play the game\nout = llm_guesser(33)\n[ Info: Condition not met. Retrying...\n[ Info: Condition not met. Retrying...\n[ Info: Condition not met. Retrying...\n[ Info: Condition not met. Retrying...\n[ Info: Results: Guess: 33 vs User: 33 (Number of calls made: 10)

Yay! We got it 😃

Now, we could explore different samples (eg, print_samples(out.samples)) or see what the model guessed at each step:

julia
print_samples(out.samples)\n## SampleNode(id: 57694, stats: 6/14, score: 0.43, length: 2)\n## ├─ SampleNode(id: 35603, stats: 5/10, score: 1.23, length: 4)\n## │  ├─ SampleNode(id: 55394, stats: 1/4, score: 1.32, length: 6)\n## │  │  ├─ SampleNode(id: 20737, stats: 0/1, score: 1.67, length: 7)\n## │  │  └─ SampleNode(id: 52910, stats: 0/1, score: 1.67, length: 7)\n## │  └─ SampleNode(id: 43094, stats: 3/4, score: 1.82, length: 6)\n## │     ├─ SampleNode(id: 14966, stats: 1/1, score: 2.67, length: 7)\n## │     └─ SampleNode(id: 32991, stats: 1/1, score: 2.67, length: 7)\n## └─ SampleNode(id: 20506, stats: 1/4, score: 1.4, length: 4)\n##    ├─ SampleNode(id: 37581, stats: 0/1, score: 1.67, length: 5)\n##    └─ SampleNode(id: 46632, stats: 0/1, score: 1.67, length: 5)\n\n# Lastly, let's check all the guesses AI made across all samples. \n# Our winning guess was ID 32991 (`out.active_sample_id`)\n\nfor sample in PostOrderDFS(out.samples)\n    [println("ID: $(sample.id), Guess: $(msg.content)")\n     for msg in sample.data if msg isa PT.AIMessage]\nend\n## ID: 20737, Guess: 50\n## ID: 20737, Guess: 35\n## ID: 20737, Guess: 37\n## ID: 52910, Guess: 50\n## ID: 52910, Guess: 35\n## ID: 52910, Guess: 32\n## ID: 14966, Guess: 50\n## ID: 14966, Guess: 35\n## ID: 14966, Guess: 33\n## ID: 32991, Guess: 50\n## ID: 32991, Guess: 35\n## ID: 32991, Guess: 33\n## etc...

Note that if there are multiple "branches" the model will see only the feedback of its own and its ancestors not the other "branches". If you wanted to provide ALL feedback, set RetryConfig(; n_samples=1) to remove any "branching". It fixing will be done sequentially in one conversation and the model will see all feedback (less powerful if the model falls into a bad state). Alternatively, you can tweak the feedback function.

See Also

References: airetry is inspired by the Language Agent Tree Search paper and by DSPy Assertions paper.

source


# PromptingTools.Experimental.AgentTools.backpropagate!Method.

Provides scores for a given node (and all its ancestors) based on the evaluation (wins, visits).

source


# PromptingTools.Experimental.AgentTools.beta_sampleMethod.
julia
beta_sample::Real, β::Real)

Approximates a sample from the Beta distribution by generating two independent Gamma distributed samples and using their ratio.

source


# PromptingTools.Experimental.AgentTools.collect_all_feedbackMethod.

Collects all feedback from the node and its ancestors (parents). Returns a string separated by separator.

source


# PromptingTools.Experimental.AgentTools.error_feedbackMethod.
julia
error_feedback(e::Any; max_length::Int = 512)

Set of specialized methods to provide feedback on different types of errors (e).

source


# PromptingTools.Experimental.AgentTools.evaluate_condition!Function.
julia
evaluate_condition!(f_cond::Function, aicall::AICallBlock,\n    feedback::Union{AbstractString, Function} = "";\n    evaluate_all::Bool = true, feedback_expensive::Bool = false)

Evalutes the condition f_cond (must return Bool) on the aicall object. If the condition is not met, it will return the best sample to retry from and provide feedback.

Mutating as the results are saved in aicall.samples

If evaluate_all is true, it will evaluate all the "successful" samples in the aicall object. Otherwise, it will only evaluate the active sample..

For f_cond and feedback functions, you can use the last_message and last_output utilities to access the last message and last output in the conversation, respectively.

Arguments

Returns

Example

julia
# Mimic AIGenerate run!\naicall = AIGenerate("Say hi!"; config = RetryConfig(; n_samples = 2))\nsample = expand!(aicall.samples, aicall.conversation; success = true)\naicall.active_sample_id = sample.id\n\n# Return whether it passed and node to take the next action from\ncond, node = AT.evaluate_condition!(x -> occursin("hi", last_output(x)), aicall)\n\n# Checks:\ncond == true\nnode == sample\nnode.wins == 1

With feedback: ```julia

Mimic AIGenerate run with feedback

aicall = AIGenerate( :BlankSystemUser; system = "a", user = "b") sample = expand!(aicall.samples, aicall.conversation; success = true) aicall.active_sample_id = sample.id

Evaluate

cond, node = AT.evaluate_condition!( x -> occursin("NOTFOUND", last_output(x)), aicall, "Feedback X") cond == false # fail sample == node # same node (no other choice) node.wins == 0 node.feedback == " Feedback X"

source


# PromptingTools.Experimental.AgentTools.expand!Method.

Expands the tree with a new node from parent using the given data and success.

source


# PromptingTools.Experimental.AgentTools.extract_configMethod.

Extracts config::RetryConfig from kwargs and returns the rest of the kwargs.

source


# PromptingTools.Experimental.AgentTools.find_nodeMethod.

Finds a node with a given id in the tree starting from node.

source


# PromptingTools.Experimental.AgentTools.gamma_sampleMethod.
julia
gamma_sample::Real, θ::Real)

Approximates a sample from the Gamma distribution using the Marsaglia and Tsang method.

source


# PromptingTools.Experimental.AgentTools.print_samplesMethod.

Pretty prints the samples tree starting from node. Usually, node is the root of the tree. Example: print_samples(aicall.samples).

source


# PromptingTools.Experimental.AgentTools.remove_used_kwargsMethod.

Removes the kwargs that have already been used in the conversation. Returns NamedTuple.

source


# PromptingTools.Experimental.AgentTools.reset_success!Function.

Sets the success field of all nodes in the tree to success value.

source


# PromptingTools.Experimental.AgentTools.run!Method.
julia
run!(codefixer::AICodeFixer; verbose::Int = 1, max_conversation_length::Int = 32000, run_kwargs...)

Executes the code fixing process encapsulated by the AICodeFixer instance. This method iteratively refines and fixes code by running the AI call in a loop for a specified number of rounds, using feedback from the code evaluation (aicodefixer_feedback) to improve the outcome in each iteration.

Arguments

Returns

Usage

julia
aicall = AICall(aigenerate, schema=mySchema, conversation=myConversation)\ncodefixer = AICodeFixer(aicall, myTemplates; num_rounds=5)\nresult = run!(codefixer, verbose=2)

Notes

source


# PromptingTools.Experimental.AgentTools.run!Method.
julia
run!(aicall::AICallBlock; verbose::Int = 1, catch_errors::Bool = false, return_all::Bool = true, kwargs...)

Executes the AI call wrapped by an AICallBlock instance. This method triggers the actual communication with the AI model and processes the response based on the provided conversation context and parameters.

Note: Currently return_all must always be set to true.

Arguments

Returns

Example

julia
aicall = AICall(aigenerate)\nrun!(aicall)

Alternatively, you can trigger the run! call by using the AICall as a functor and calling it with a string or a UserMessage:

julia
aicall = AICall(aigenerate)\naicall("Say hi!")

Notes

source


# PromptingTools.Experimental.AgentTools.scoreMethod.

Scores a node using the ThomsonSampling method, similar to Bandit algorithms.

source


# PromptingTools.Experimental.AgentTools.scoreMethod.

Scores a node using the UCT (Upper Confidence Bound for Trees) method.

source


# PromptingTools.Experimental.AgentTools.select_bestFunction.
julia
select_best(node::SampleNode, scoring::AbstractScoringMethod = UCT();\n    ordering::Symbol = :PostOrderDFS)

Selects the best node from the tree using the given scoring (UCT or ThompsonSampling). Defaults to UCT. Thompson Sampling is more random with small samples, while UCT stabilizes much quicker thanks to looking at parent nodes as well.

Ordering can be either :PreOrderDFS or :PostOrderDFS. Defaults to :PostOrderDFS, which favors the leaves (end points of the tree).

Example

Compare the different scoring methods:

julia
# Set up mock samples and scores\ndata = PT.AbstractMessage[]\nroot = SampleNode(; data)\nchild1 = expand!(root, data)\nbackpropagate!(child1; wins = 1, visits = 1)\nchild2 = expand!(root, data)\nbackpropagate!(child2; wins = 0, visits = 1)\nchild11 = expand!(child1, data)\nbackpropagate!(child11; wins = 1, visits = 1)\n\n# Select with UCT\nn = select_best(root, UCT())\nSampleNode(id: 29826, stats: 1/1, length: 0)\n\n# Show the tree:\nprint_samples(root; scoring = UCT())\n## SampleNode(id: 13184, stats: 2/3, score: 0.67, length: 0)\n## ├─ SampleNode(id: 26078, stats: 2/2, score: 2.05, length: 0)\n## │  └─ SampleNode(id: 29826, stats: 1/1, score: 2.18, length: 0)\n## └─ SampleNode(id: 39931, stats: 0/1, score: 1.48, length: 0)\n\n# Select with ThompsonSampling - much more random with small samples\nn = select_best(root, ThompsonSampling())\nSampleNode(id: 26078, stats: 2/2, length: 0)\n\n# Show the tree (run it a few times and see how the scores jump around):\nprint_samples(root; scoring = ThompsonSampling())\n## SampleNode(id: 13184, stats: 2/3, score: 0.6, length: 0)\n## ├─ SampleNode(id: 26078, stats: 2/2, score: 0.93, length: 0)\n## │  └─ SampleNode(id: 29826, stats: 1/1, score: 0.22, length: 0)\n## └─ SampleNode(id: 39931, stats: 0/1, score: 0.84, length: 0)

source


# PromptingTools.Experimental.AgentTools.split_multi_samplesMethod.

If the conversation has multiple AIMessage samples, split them into separate conversations with the common past.

source


# PromptingTools.Experimental.AgentTools.truncate_conversationMethod.
julia
truncate_conversation(conversation::AbstractVector{<:PT.AbstractMessage};\n    max_conversation_length::Int = 32000)

Truncates a given conversation to a max_conversation_length characters by removing messages "in the middle". It tries to retain the original system+user message and also the most recent messages.

Practically, if a conversation is too long, it will start by removing the most recent message EXCEPT for the last two (assumed to be the last AIMessage with the code and UserMessage with the feedback

Arguments

max_conversation_length is in characters; assume c. 2-3 characters per LLM token, so 32000 should correspond to 16K context window.

source


# PromptingTools.Experimental.AgentTools.unwrap_aicall_argsMethod.

Unwraps the arguments for AICall and returns the schema and conversation (if provided). Expands any provided AITemplate.

source


# PromptingTools.last_messageMethod.

Helpful accessor for AICall blocks. Returns the last message in the conversation.

source


# PromptingTools.last_outputMethod.

Helpful accessor for AICall blocks. Returns the last output in the conversation (eg, the string/data in the last message).

source


', 76) + createStaticVNode('

Reference for AgentTools

# PromptingTools.Experimental.AgentToolsModule.
julia
AgentTools

Provides Agentic functionality providing lazy calls for building pipelines (eg, AIGenerate) and AICodeFixer.

This module is experimental and may change at any time. It is intended to be moved to a separate package in the future.

source


# PromptingTools.Experimental.AgentTools.AICallType.
julia
AICall(func::F, args...; kwargs...) where {F<:Function}\n\nAIGenerate(args...; kwargs...)\nAIEmbed(args...; kwargs...)\nAIExtract(args...; kwargs...)

A lazy call wrapper for AI functions in the PromptingTools module, such as aigenerate.

The AICall struct is designed to facilitate a deferred execution model (lazy evaluation) for AI functions that interact with a Language Learning Model (LLM). It stores the necessary information for an AI call and executes the underlying AI function only when supplied with a UserMessage or when the run! method is applied. This approach allows for more flexible and efficient handling of AI function calls, especially in interactive environments.

Seel also: run!, AICodeFixer

Fields

Example

Initiate an AICall like any ai* function, eg, AIGenerate:

julia
aicall = AICall(aigenerate)\n\n# With arguments and kwargs like ai* functions\n# from `aigenerate(schema, conversation; model="abc", api_kwargs=(; temperature=0.1))`\n# to\naicall = AICall(aigenerate, schema, conversation; model="abc", api_kwargs=(; temperature=0.1)\n\n# Or with a template\naicall = AIGenerate(:JuliaExpertAsk; ask="xyz", model="abc", api_kwargs=(; temperature=0.1))

Trigger the AICall with run! (it returns the update AICall struct back):

julia
aicall |> run!\n````\n\nYou can also use `AICall` as a functor to trigger the AI call with a `UserMessage` or simply the text to send:

julia aicall(UserMessage("Hello, world!")) # Triggers the lazy call result = run!(aicall) # Explicitly runs the AI call ``` This can be used to "reply" to previous message / continue the stored conversation

Notes

source


# PromptingTools.Experimental.AgentTools.AICodeFixerType.
julia
AICodeFixer(aicall::AICall, templates::Vector{<:PT.UserMessage}; num_rounds::Int = 3, feedback_func::Function = aicodefixer_feedback; kwargs...)\nAICodeFixer(aicall::AICall, template::Union{AITemplate, Symbol} = :CodeFixerRCI; kwargs...)

An AIAgent that iteratively evaluates any received Julia code and provides feedback back to the AI model if num_rounds>0. AICodeFixer manages the lifecycle of a code fixing session, including tracking conversation history, rounds of interaction, and applying user feedback through a specialized feedback function.

It integrates with lazy AI call structures like AIGenerate.

The operation is "lazy", ie, the agent is only executed when needed, eg, when run! is called.

Fields

Note: Any kwargs provided to run!() will be passed to the underlying AICall.

Example

Let's create an AIGenerate call and then pipe it to AICodeFixer to run a few rounds of the coding fixing:

julia
# Create an AIGenerate call\nlazy_call = AIGenerate("Write a function to do XYZ...")\n\n# the action starts only when `run!` is called\nresult = lazy_call |> AICodeFixer |> run!\n\n# Access the result of the code fixing session\n# result.call refers to the AIGenerate lazy call above\nconversation = result.call.conversation\nfixed_code = last(conversation) # usually in the last message\n\n# Preview the conversation history\npreview(conversation)

You can change the template used to provide user feedback and number of counds via arguments:

julia
# Setup an AIGenerate call\nlazy_call = AIGenerate(aigenerate, "Write code to do XYZ...")\n\n# Custom template and 2 fixing rounds\nresult = AICodeFixer(lazy_call, [PT.UserMessage("Please fix the code.\n\nFeedback: {{feedback}}")]; num_rounds = 2) |> run!\n\n# The result now contains the AI's attempts to fix the code\npreview(result.call.conversation)

Notes

source


# PromptingTools.Experimental.AgentTools.RetryConfigType.
julia
RetryConfig

Configuration for self-fixing the AI calls. It includes the following fields:

Fields

source


# PromptingTools.Experimental.AgentTools.SampleNodeType.
julia
SampleNode{T}

A node in the Monte Carlo Tree Search tree.

It's used to hold the data we're trying to optimize/discover (eg, a conversation), the scores from evaluation (wins, visits) and the results of the evaluations upon failure (feedback).

Fields

source


# PromptingTools.Experimental.AgentTools.ThompsonSamplingType.
julia
ThompsonSampling <: AbstractScoringMethod

Implements scoring and selection for Thompson Sampling method. See https://en.wikipedia.org/wiki/Thompson_sampling for more details.

source


# PromptingTools.Experimental.AgentTools.UCTType.
julia
UCT <: AbstractScoringMethod

Implements scoring and selection for UCT (Upper Confidence Bound for Trees) sampling method. See https://en.wikipedia.org/wiki/Monte_Carlo_tree_search#Exploration_and_exploitation for more details.

source


# PromptingTools.Experimental.AgentTools.AIClassifyMethod.
julia
AIClassify(args...; kwargs...)

Creates a lazy instance of aiclassify. It is an instance of AICall with aiclassify as the function.

Use exactly the same arguments and keyword arguments as aiclassify (see ?aiclassify for details).

source


# PromptingTools.Experimental.AgentTools.AIEmbedMethod.
julia
AIEmbed(args...; kwargs...)

Creates a lazy instance of aiembed. It is an instance of AICall with aiembed as the function.

Use exactly the same arguments and keyword arguments as aiembed (see ?aiembed for details).

source


# PromptingTools.Experimental.AgentTools.AIExtractMethod.
julia
AIExtract(args...; kwargs...)

Creates a lazy instance of aiextract. It is an instance of AICall with aiextract as the function.

Use exactly the same arguments and keyword arguments as aiextract (see ?aiextract for details).

source


# PromptingTools.Experimental.AgentTools.AIGenerateMethod.
julia
AIGenerate(args...; kwargs...)

Creates a lazy instance of aigenerate. It is an instance of AICall with aigenerate as the function.

Use exactly the same arguments and keyword arguments as aigenerate (see ?aigenerate for details).

source


# PromptingTools.Experimental.AgentTools.AIScanMethod.
julia
AIScan(args...; kwargs...)

Creates a lazy instance of aiscan. It is an instance of AICall with aiscan as the function.

Use exactly the same arguments and keyword arguments as aiscan (see ?aiscan for details).

source


# PromptingTools.Experimental.AgentTools.add_feedback!Method.
julia
add_feedback!(\n    conversation::AbstractVector{<:PT.AbstractMessage}, sample::SampleNode; feedback_inplace::Bool = false,\n    feedback_template::Symbol = :FeedbackFromEvaluator)

Adds formatted feedback to the conversation based on the sample node feedback (and its ancestors).

Arguments

Example

julia
sample = SampleNode(; data = nothing, feedback = "Feedback X")\nconversation = [PT.UserMessage("I say hi!"), PT.AIMessage(; content = "I say hi!")]\nconversation = AT.add_feedback!(conversation, sample)\nconversation[end].content == "### Feedback from Evaluator\\nFeedback X\\n"\n\nInplace feedback:

julia conversation = [PT.UserMessage("I say hi!"), PT.AIMessage(; content = "I say hi!")] conversation = AT.add_feedback!(conversation, sample; feedback_inplace = true) conversation[end].content == "I say hi!\\n\\n### Feedback from Evaluator\\nFeedback X\\n"

\nSample with ancestors with feedback:

julia sample_p = SampleNode(; data = nothing, feedback = "\\nFeedback X") sample = expand!(sample_p, nothing) sample.feedback = "\\nFeedback Y" conversation = [PT.UserMessage("I say hi!"), PT.AIMessage(; content = "I say hi!")] conversation = AT.add_feedback!(conversation, sample)

conversation[end].content == "### Feedback from Evaluator\\n\\nFeedback X\\n–––––\\n\\nFeedback Y\\n" ```

source


# PromptingTools.Experimental.AgentTools.aicodefixer_feedbackMethod.
julia
aicodefixer_feedback(cb::AICode; max_length::Int = 512) -> NamedTuple(; feedback::String)\naicodefixer_feedback(conversation::AbstractVector{<:PT.AbstractMessage}; max_length::Int = 512) -> NamedTuple(; feedback::String)\naicodefixer_feedback(msg::PT.AIMessage; max_length::Int = 512) -> NamedTuple(; feedback::String)\naicodefixer_feedback(aicall::AICall; max_length::Int = 512) -> NamedTuple(; feedback::String)

Generate feedback for an AI code fixing session based on the AICode block /or conversation history (that will be used to extract and evaluate a code block). Function is designed to be extensible for different types of feedback and code evaluation outcomes.

The highlevel wrapper accepts a conversation and returns new kwargs for the AICall.

Individual feedback functions are dispatched on different subtypes of AbstractCodeOutcome and can be extended/overwritten to provide more detailed feedback.

See also: AIGenerate, AICodeFixer

Arguments

Returns

Example

julia
cb = AICode(msg; skip_unsafe = true, capture_stdout = true)\nnew_kwargs = aicodefixer_feedback(cb)\n\nnew_kwargs = aicodefixer_feedback(msg)\nnew_kwargs = aicodefixer_feedback(conversation)

Notes

This function is part of the AI code fixing system, intended to interact with code in AIMessage and provide feedback on improving it.

The highlevel wrapper accepts a conversation and returns new kwargs for the AICall.

It dispatches for the code feedback based on the subtypes of AbstractCodeOutcome below:

You can override the individual methods to customize the feedback.

source


# PromptingTools.Experimental.AgentTools.airetry!Function.
julia
airetry!(\n    f_cond::Function, aicall::AICallBlock, feedback::Union{AbstractString, Function} = "";\n    verbose::Bool = true, throw::Bool = false, evaluate_all::Bool = true, feedback_expensive::Bool = false,\n    max_retries::Union{Nothing, Int} = nothing, retry_delay::Union{Nothing, Int} = nothing)

Evaluates the condition f_cond on the aicall object. If the condition is not met, it will return the best sample to retry from and provide feedback (string or function) to aicall. That's why it's mutating. It will retry maximum max_retries times, with throw=true, an error will be thrown if the condition is not met after max_retries retries.

Note: aicall must be run first via run!(aicall) before calling airetry!.

Function signatures

You can leverage the last_message, last_output, and AICode functions to access the last message, last output and execute code blocks in the conversation, respectively. See examples below.

Good Use Cases

Gotchas

Arguments

Returns

Example

You can use airetry! to catch API errors in run! and auto-retry the call. RetryConfig is how you influence all the subsequent retry behaviours - see ?RetryConfig for more details.

julia
# API failure because of a non-existent model\nout = AIGenerate("say hi!"; config = RetryConfig(; catch_errors = true),\n    model = "NOTEXIST")\nrun!(out) # fails\n\n# we ask to wait 2s between retries and retry 2 times (can be set in `config` in aicall as well)\nairetry!(isvalid, out; retry_delay = 2, max_retries = 2)

If you provide arguments to the aicall, we try to honor them as much as possible in the following calls, eg, set low verbosity

julia
out = AIGenerate("say hi!"; config = RetryConfig(; catch_errors = true),\nmodel = "NOTEXIST", verbose=false)\nrun!(out)\n# No info message, you just see `success = false` in the properties of the AICall

Let's show a toy example to demonstrate the runtime checks / guardrails for the model output. We'll play a color guessing game (I'm thinking "yellow"):

julia
# Notice that we ask for two samples (`n_samples=2`) at each attempt (to improve our chances). \n# Both guesses are scored at each time step, and the best one is chosen for the next step.\n# And with OpenAI, we can set `api_kwargs = (;n=2)` to get both samples simultaneously (cheaper and faster)!\nout = AIGenerate(\n    "Guess what color I'm thinking. It could be: blue, red, black, white, yellow. Answer with 1 word only";\n    verbose = false,\n    config = RetryConfig(; n_samples = 2), api_kwargs = (; n = 2))\nrun!(out)\n\n\n## Check that the output is 1 word only, third argument is the feedback that will be provided if the condition fails\n## Notice: functions operate on `aicall` as the only argument. We can use utilities like `last_output` and `last_message` to access the last message and output in the conversation.\nairetry!(x -> length(split(last_output(x), r" |\\.")) == 1, out,\n    "You must answer with 1 word only.")\n\n\n## Let's ensure that the output is in lowercase - simple and short\nairetry!(x -> all(islowercase, last_output(x)), out, "You must answer in lowercase.")\n# [ Info: Condition not met. Retrying...\n\n\n## Let's add final hint - it took us 2 retries\nairetry!(x -> startswith(last_output(x), "y"), out, "It starts with "y"")\n# [ Info: Condition not met. Retrying...\n# [ Info: Condition not met. Retrying...\n\n\n## We end up with the correct answer\nlast_output(out)\n# Output: "yellow"

Let's explore how we got here. We save the various attempts in a "tree" (SampleNode object) You can access it in out.samples, which is the ROOT of the tree (top level). Currently "active" sample ID is out.active_sample_id -> that's the same as conversation field in your AICall.

julia
# Root node:\nout.samples\n# Output: SampleNode(id: 46839, stats: 6/12, length: 2)\n\n# Active sample (our correct answer):\nout.active_sample_id \n# Output: 50086\n\n# Let's obtain the active sample node with this ID  - use getindex notation or function find_node\nout.samples[out.active_sample_id]\n# Output: SampleNode(id: 50086, stats: 1/1, length: 7)\n\n# The SampleNode has two key fields: data and feedback. Data is where the conversation is stored:\nactive_sample = out.samples[out.active_sample_id]\nactive_sample.data == out.conversation # Output: true -> This is the winning guess!

We also get a clear view of the tree structure of all samples with print_samples:

julia
julia> print_samples(out.samples)\nSampleNode(id: 46839, stats: 6/12, score: 0.5, length: 2)\n├─ SampleNode(id: 12940, stats: 5/8, score: 1.41, length: 4)\n│  ├─ SampleNode(id: 34315, stats: 3/4, score: 1.77, length: 6)\n│  │  ├─ SampleNode(id: 20493, stats: 1/1, score: 2.67, length: 7)\n│  │  └─ SampleNode(id: 50086, stats: 1/1, score: 2.67, length: 7)\n│  └─ SampleNode(id: 2733, stats: 1/2, score: 1.94, length: 5)\n└─ SampleNode(id: 48343, stats: 1/4, score: 1.36, length: 4)\n   ├─ SampleNode(id: 30088, stats: 0/1, score: 1.67, length: 5)\n   └─ SampleNode(id: 44816, stats: 0/1, score: 1.67, length: 5)

You can use the id to grab and inspect any of these nodes, eg,

julia
out.samples[2733]\n# Output: SampleNode(id: 2733, stats: 1/2, length: 5)

We can also iterate through all samples and extract whatever information we want with PostOrderDFS or PreOrderDFS (exported from AbstractTrees.jl)

julia
for sample in PostOrderDFS(out.samples)\n    # Data is the universal field for samples, we put `conversation` in there\n    # Last item in data is the last message in coversation\n    msg = sample.data[end]\n    if msg isa PT.AIMessage # skip feedback\n        # get only the message content, ie, the guess\n        println("ID: $(sample.id), Answer: $(msg.content)")\n    end\nend\n\n# ID: 20493, Answer: yellow\n# ID: 50086, Answer: yellow\n# ID: 2733, Answer: red\n# ID: 30088, Answer: blue\n# ID: 44816, Answer: blue

Note: airetry! will attempt to fix the model max_retries times. If you set throw=true, it will throw an ErrorException if the condition is not met after max_retries retries.

Let's define a mini program to guess the number and use airetry! to guide the model to the correct answer:

julia
"""\n    llm_guesser()\n\nMini program to guess the number provided by the user (betwee 1-100).\n"""\nfunction llm_guesser(user_number::Int)\n    @assert 1 <= user_number <= 100\n    prompt = """\nI'm thinking a number between 1-100. Guess which one it is. \nYou must respond only with digits and nothing else. \nYour guess:"""\n    ## 2 samples at a time, max 5 fixing rounds\n    out = AIGenerate(prompt; config = RetryConfig(; n_samples = 2, max_retries = 5),\n        api_kwargs = (; n = 2)) |> run!\n    ## Check the proper output format - must parse to Int, use do-syntax\n    ## We can provide feedback via a function!\n    function feedback_f(aicall)\n        "Output: $(last_output(aicall))\nFeedback: You must respond only with digits!!"\n    end\n    airetry!(out, feedback_f) do aicall\n        !isnothing(tryparse(Int, last_output(aicall)))\n    end\n    ## Give a hint on bounds\n    lower_bound = (user_number ÷ 10) * 10\n    upper_bound = lower_bound + 10\n    airetry!(\n        out, "The number is between or equal to $lower_bound to $upper_bound.") do aicall\n        guess = tryparse(Int, last_output(aicall))\n        lower_bound <= guess <= upper_bound\n    end\n    ## You can make at most 3x guess now -- if there is max_retries in `config.max_retries` left\n    max_retries = out.config.retries + 3\n    function feedback_f2(aicall)\n        guess = tryparse(Int, last_output(aicall))\n        "Your guess of $(guess) is wrong, it's $(abs(guess-user_number)) numbers away."\n    end\n    airetry!(out, feedback_f2; max_retries) do aicall\n        tryparse(Int, last_output(aicall)) == user_number\n    end\n\n    ## Evaluate the best guess\n    @info "Results: Guess: $(last_output(out)) vs User: $user_number (Number of calls made: $(out.config.calls))"\n    return out\nend\n\n# Let's play the game\nout = llm_guesser(33)\n[ Info: Condition not met. Retrying...\n[ Info: Condition not met. Retrying...\n[ Info: Condition not met. Retrying...\n[ Info: Condition not met. Retrying...\n[ Info: Results: Guess: 33 vs User: 33 (Number of calls made: 10)

Yay! We got it 😃

Now, we could explore different samples (eg, print_samples(out.samples)) or see what the model guessed at each step:

julia
print_samples(out.samples)\n## SampleNode(id: 57694, stats: 6/14, score: 0.43, length: 2)\n## ├─ SampleNode(id: 35603, stats: 5/10, score: 1.23, length: 4)\n## │  ├─ SampleNode(id: 55394, stats: 1/4, score: 1.32, length: 6)\n## │  │  ├─ SampleNode(id: 20737, stats: 0/1, score: 1.67, length: 7)\n## │  │  └─ SampleNode(id: 52910, stats: 0/1, score: 1.67, length: 7)\n## │  └─ SampleNode(id: 43094, stats: 3/4, score: 1.82, length: 6)\n## │     ├─ SampleNode(id: 14966, stats: 1/1, score: 2.67, length: 7)\n## │     └─ SampleNode(id: 32991, stats: 1/1, score: 2.67, length: 7)\n## └─ SampleNode(id: 20506, stats: 1/4, score: 1.4, length: 4)\n##    ├─ SampleNode(id: 37581, stats: 0/1, score: 1.67, length: 5)\n##    └─ SampleNode(id: 46632, stats: 0/1, score: 1.67, length: 5)\n\n# Lastly, let's check all the guesses AI made across all samples. \n# Our winning guess was ID 32991 (`out.active_sample_id`)\n\nfor sample in PostOrderDFS(out.samples)\n    [println("ID: $(sample.id), Guess: $(msg.content)")\n     for msg in sample.data if msg isa PT.AIMessage]\nend\n## ID: 20737, Guess: 50\n## ID: 20737, Guess: 35\n## ID: 20737, Guess: 37\n## ID: 52910, Guess: 50\n## ID: 52910, Guess: 35\n## ID: 52910, Guess: 32\n## ID: 14966, Guess: 50\n## ID: 14966, Guess: 35\n## ID: 14966, Guess: 33\n## ID: 32991, Guess: 50\n## ID: 32991, Guess: 35\n## ID: 32991, Guess: 33\n## etc...

Note that if there are multiple "branches" the model will see only the feedback of its own and its ancestors not the other "branches". If you wanted to provide ALL feedback, set RetryConfig(; n_samples=1) to remove any "branching". It fixing will be done sequentially in one conversation and the model will see all feedback (less powerful if the model falls into a bad state). Alternatively, you can tweak the feedback function.

See Also

References: airetry is inspired by the Language Agent Tree Search paper and by DSPy Assertions paper.

source


# PromptingTools.Experimental.AgentTools.backpropagate!Method.

Provides scores for a given node (and all its ancestors) based on the evaluation (wins, visits).

source


# PromptingTools.Experimental.AgentTools.beta_sampleMethod.
julia
beta_sample::Real, β::Real)

Approximates a sample from the Beta distribution by generating two independent Gamma distributed samples and using their ratio.

source


# PromptingTools.Experimental.AgentTools.collect_all_feedbackMethod.

Collects all feedback from the node and its ancestors (parents). Returns a string separated by separator.

source


# PromptingTools.Experimental.AgentTools.error_feedbackMethod.
julia
error_feedback(e::Any; max_length::Int = 512)

Set of specialized methods to provide feedback on different types of errors (e).

source


# PromptingTools.Experimental.AgentTools.evaluate_condition!Function.
julia
evaluate_condition!(f_cond::Function, aicall::AICallBlock,\n    feedback::Union{AbstractString, Function} = "";\n    evaluate_all::Bool = true, feedback_expensive::Bool = false)

Evalutes the condition f_cond (must return Bool) on the aicall object. If the condition is not met, it will return the best sample to retry from and provide feedback.

Mutating as the results are saved in aicall.samples

If evaluate_all is true, it will evaluate all the "successful" samples in the aicall object. Otherwise, it will only evaluate the active sample..

For f_cond and feedback functions, you can use the last_message and last_output utilities to access the last message and last output in the conversation, respectively.

Arguments

Returns

Example

julia
# Mimic AIGenerate run!\naicall = AIGenerate("Say hi!"; config = RetryConfig(; n_samples = 2))\nsample = expand!(aicall.samples, aicall.conversation; success = true)\naicall.active_sample_id = sample.id\n\n# Return whether it passed and node to take the next action from\ncond, node = AT.evaluate_condition!(x -> occursin("hi", last_output(x)), aicall)\n\n# Checks:\ncond == true\nnode == sample\nnode.wins == 1

With feedback: ```julia

Mimic AIGenerate run with feedback

aicall = AIGenerate( :BlankSystemUser; system = "a", user = "b") sample = expand!(aicall.samples, aicall.conversation; success = true) aicall.active_sample_id = sample.id

Evaluate

cond, node = AT.evaluate_condition!( x -> occursin("NOTFOUND", last_output(x)), aicall, "Feedback X") cond == false # fail sample == node # same node (no other choice) node.wins == 0 node.feedback == " Feedback X"

source


# PromptingTools.Experimental.AgentTools.expand!Method.

Expands the tree with a new node from parent using the given data and success.

source


# PromptingTools.Experimental.AgentTools.extract_configMethod.

Extracts config::RetryConfig from kwargs and returns the rest of the kwargs.

source


# PromptingTools.Experimental.AgentTools.find_nodeMethod.

Finds a node with a given id in the tree starting from node.

source


# PromptingTools.Experimental.AgentTools.gamma_sampleMethod.
julia
gamma_sample::Real, θ::Real)

Approximates a sample from the Gamma distribution using the Marsaglia and Tsang method.

source


# PromptingTools.Experimental.AgentTools.print_samplesMethod.

Pretty prints the samples tree starting from node. Usually, node is the root of the tree. Example: print_samples(aicall.samples).

source


# PromptingTools.Experimental.AgentTools.remove_used_kwargsMethod.

Removes the kwargs that have already been used in the conversation. Returns NamedTuple.

source


# PromptingTools.Experimental.AgentTools.reset_success!Function.

Sets the success field of all nodes in the tree to success value.

source


# PromptingTools.Experimental.AgentTools.run!Method.
julia
run!(codefixer::AICodeFixer; verbose::Int = 1, max_conversation_length::Int = 32000, run_kwargs...)

Executes the code fixing process encapsulated by the AICodeFixer instance. This method iteratively refines and fixes code by running the AI call in a loop for a specified number of rounds, using feedback from the code evaluation (aicodefixer_feedback) to improve the outcome in each iteration.

Arguments

Returns

Usage

julia
aicall = AICall(aigenerate, schema=mySchema, conversation=myConversation)\ncodefixer = AICodeFixer(aicall, myTemplates; num_rounds=5)\nresult = run!(codefixer, verbose=2)

Notes

source


# PromptingTools.Experimental.AgentTools.run!Method.
julia
run!(aicall::AICallBlock; verbose::Int = 1, catch_errors::Bool = false, return_all::Bool = true, kwargs...)

Executes the AI call wrapped by an AICallBlock instance. This method triggers the actual communication with the AI model and processes the response based on the provided conversation context and parameters.

Note: Currently return_all must always be set to true.

Arguments

Returns

Example

julia
aicall = AICall(aigenerate)\nrun!(aicall)

Alternatively, you can trigger the run! call by using the AICall as a functor and calling it with a string or a UserMessage:

julia
aicall = AICall(aigenerate)\naicall("Say hi!")

Notes

source


# PromptingTools.Experimental.AgentTools.scoreMethod.

Scores a node using the ThomsonSampling method, similar to Bandit algorithms.

source


# PromptingTools.Experimental.AgentTools.scoreMethod.

Scores a node using the UCT (Upper Confidence Bound for Trees) method.

source


# PromptingTools.Experimental.AgentTools.select_bestFunction.
julia
select_best(node::SampleNode, scoring::AbstractScoringMethod = UCT();\n    ordering::Symbol = :PostOrderDFS)

Selects the best node from the tree using the given scoring (UCT or ThompsonSampling). Defaults to UCT. Thompson Sampling is more random with small samples, while UCT stabilizes much quicker thanks to looking at parent nodes as well.

Ordering can be either :PreOrderDFS or :PostOrderDFS. Defaults to :PostOrderDFS, which favors the leaves (end points of the tree).

Example

Compare the different scoring methods:

julia
# Set up mock samples and scores\ndata = PT.AbstractMessage[]\nroot = SampleNode(; data)\nchild1 = expand!(root, data)\nbackpropagate!(child1; wins = 1, visits = 1)\nchild2 = expand!(root, data)\nbackpropagate!(child2; wins = 0, visits = 1)\nchild11 = expand!(child1, data)\nbackpropagate!(child11; wins = 1, visits = 1)\n\n# Select with UCT\nn = select_best(root, UCT())\nSampleNode(id: 29826, stats: 1/1, length: 0)\n\n# Show the tree:\nprint_samples(root; scoring = UCT())\n## SampleNode(id: 13184, stats: 2/3, score: 0.67, length: 0)\n## ├─ SampleNode(id: 26078, stats: 2/2, score: 2.05, length: 0)\n## │  └─ SampleNode(id: 29826, stats: 1/1, score: 2.18, length: 0)\n## └─ SampleNode(id: 39931, stats: 0/1, score: 1.48, length: 0)\n\n# Select with ThompsonSampling - much more random with small samples\nn = select_best(root, ThompsonSampling())\nSampleNode(id: 26078, stats: 2/2, length: 0)\n\n# Show the tree (run it a few times and see how the scores jump around):\nprint_samples(root; scoring = ThompsonSampling())\n## SampleNode(id: 13184, stats: 2/3, score: 0.6, length: 0)\n## ├─ SampleNode(id: 26078, stats: 2/2, score: 0.93, length: 0)\n## │  └─ SampleNode(id: 29826, stats: 1/1, score: 0.22, length: 0)\n## └─ SampleNode(id: 39931, stats: 0/1, score: 0.84, length: 0)

source


# PromptingTools.Experimental.AgentTools.split_multi_samplesMethod.

If the conversation has multiple AIMessage samples, split them into separate conversations with the common past.

source


# PromptingTools.Experimental.AgentTools.truncate_conversationMethod.
julia
truncate_conversation(conversation::AbstractVector{<:PT.AbstractMessage};\n    max_conversation_length::Int = 32000)

Truncates a given conversation to a max_conversation_length characters by removing messages "in the middle". It tries to retain the original system+user message and also the most recent messages.

Practically, if a conversation is too long, it will start by removing the most recent message EXCEPT for the last two (assumed to be the last AIMessage with the code and UserMessage with the feedback

Arguments

max_conversation_length is in characters; assume c. 2-3 characters per LLM token, so 32000 should correspond to 16K context window.

source


# PromptingTools.Experimental.AgentTools.unwrap_aicall_argsMethod.

Unwraps the arguments for AICall and returns the schema and conversation (if provided). Expands any provided AITemplate.

source


# PromptingTools.last_messageMethod.

Helpful accessor for AICall blocks. Returns the last message in the conversation.

source


# PromptingTools.last_outputMethod.

Helpful accessor for AICall blocks. Returns the last output in the conversation (eg, the string/data in the last message).

source


', 76) ])); } const reference_agenttools = /* @__PURE__ */ _export_sfc(_sfc_main, [["render", _sfc_render]]); diff --git a/previews/PR218/assets/reference_agenttools.md.D536xaTJ.lean.js b/previews/PR218/assets/reference_agenttools.md.CE_B_eQV.lean.js similarity index 97% rename from previews/PR218/assets/reference_agenttools.md.D536xaTJ.lean.js rename to previews/PR218/assets/reference_agenttools.md.CE_B_eQV.lean.js index 13b7f9e45..80bc98080 100644 --- a/previews/PR218/assets/reference_agenttools.md.D536xaTJ.lean.js +++ b/previews/PR218/assets/reference_agenttools.md.CE_B_eQV.lean.js @@ -3,7 +3,7 @@ const __pageData = JSON.parse('{"title":"Reference for AgentTools","description" const _sfc_main = { name: "reference_agenttools.md" }; function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { return openBlock(), createElementBlock("div", null, _cache[0] || (_cache[0] = [ - createStaticVNode('

Reference for AgentTools

# PromptingTools.Experimental.AgentToolsModule.
julia
AgentTools

Provides Agentic functionality providing lazy calls for building pipelines (eg, AIGenerate) and AICodeFixer.

This module is experimental and may change at any time. It is intended to be moved to a separate package in the future.

source


# PromptingTools.Experimental.AgentTools.AICallType.
julia
AICall(func::F, args...; kwargs...) where {F<:Function}\n\nAIGenerate(args...; kwargs...)\nAIEmbed(args...; kwargs...)\nAIExtract(args...; kwargs...)

A lazy call wrapper for AI functions in the PromptingTools module, such as aigenerate.

The AICall struct is designed to facilitate a deferred execution model (lazy evaluation) for AI functions that interact with a Language Learning Model (LLM). It stores the necessary information for an AI call and executes the underlying AI function only when supplied with a UserMessage or when the run! method is applied. This approach allows for more flexible and efficient handling of AI function calls, especially in interactive environments.

Seel also: run!, AICodeFixer

Fields

Example

Initiate an AICall like any ai* function, eg, AIGenerate:

julia
aicall = AICall(aigenerate)\n\n# With arguments and kwargs like ai* functions\n# from `aigenerate(schema, conversation; model="abc", api_kwargs=(; temperature=0.1))`\n# to\naicall = AICall(aigenerate, schema, conversation; model="abc", api_kwargs=(; temperature=0.1)\n\n# Or with a template\naicall = AIGenerate(:JuliaExpertAsk; ask="xyz", model="abc", api_kwargs=(; temperature=0.1))

Trigger the AICall with run! (it returns the update AICall struct back):

julia
aicall |> run!\n````\n\nYou can also use `AICall` as a functor to trigger the AI call with a `UserMessage` or simply the text to send:

julia aicall(UserMessage("Hello, world!")) # Triggers the lazy call result = run!(aicall) # Explicitly runs the AI call ``` This can be used to "reply" to previous message / continue the stored conversation

Notes

source


# PromptingTools.Experimental.AgentTools.AICodeFixerType.
julia
AICodeFixer(aicall::AICall, templates::Vector{<:PT.UserMessage}; num_rounds::Int = 3, feedback_func::Function = aicodefixer_feedback; kwargs...)\nAICodeFixer(aicall::AICall, template::Union{AITemplate, Symbol} = :CodeFixerRCI; kwargs...)

An AIAgent that iteratively evaluates any received Julia code and provides feedback back to the AI model if num_rounds>0. AICodeFixer manages the lifecycle of a code fixing session, including tracking conversation history, rounds of interaction, and applying user feedback through a specialized feedback function.

It integrates with lazy AI call structures like AIGenerate.

The operation is "lazy", ie, the agent is only executed when needed, eg, when run! is called.

Fields

Note: Any kwargs provided to run!() will be passed to the underlying AICall.

Example

Let's create an AIGenerate call and then pipe it to AICodeFixer to run a few rounds of the coding fixing:

julia
# Create an AIGenerate call\nlazy_call = AIGenerate("Write a function to do XYZ...")\n\n# the action starts only when `run!` is called\nresult = lazy_call |> AICodeFixer |> run!\n\n# Access the result of the code fixing session\n# result.call refers to the AIGenerate lazy call above\nconversation = result.call.conversation\nfixed_code = last(conversation) # usually in the last message\n\n# Preview the conversation history\npreview(conversation)

You can change the template used to provide user feedback and number of counds via arguments:

julia
# Setup an AIGenerate call\nlazy_call = AIGenerate(aigenerate, "Write code to do XYZ...")\n\n# Custom template and 2 fixing rounds\nresult = AICodeFixer(lazy_call, [PT.UserMessage("Please fix the code.\n\nFeedback: {{feedback}}")]; num_rounds = 2) |> run!\n\n# The result now contains the AI's attempts to fix the code\npreview(result.call.conversation)

Notes

source


# PromptingTools.Experimental.AgentTools.RetryConfigType.
julia
RetryConfig

Configuration for self-fixing the AI calls. It includes the following fields:

Fields

source


# PromptingTools.Experimental.AgentTools.SampleNodeType.
julia
SampleNode{T}

A node in the Monte Carlo Tree Search tree.

It's used to hold the data we're trying to optimize/discover (eg, a conversation), the scores from evaluation (wins, visits) and the results of the evaluations upon failure (feedback).

Fields

source


# PromptingTools.Experimental.AgentTools.ThompsonSamplingType.
julia
ThompsonSampling <: AbstractScoringMethod

Implements scoring and selection for Thompson Sampling method. See https://en.wikipedia.org/wiki/Thompson_sampling for more details.

source


# PromptingTools.Experimental.AgentTools.UCTType.
julia
UCT <: AbstractScoringMethod

Implements scoring and selection for UCT (Upper Confidence Bound for Trees) sampling method. See https://en.wikipedia.org/wiki/Monte_Carlo_tree_search#Exploration_and_exploitation for more details.

source


# PromptingTools.Experimental.AgentTools.AIClassifyMethod.
julia
AIClassify(args...; kwargs...)

Creates a lazy instance of aiclassify. It is an instance of AICall with aiclassify as the function.

Use exactly the same arguments and keyword arguments as aiclassify (see ?aiclassify for details).

source


# PromptingTools.Experimental.AgentTools.AIEmbedMethod.
julia
AIEmbed(args...; kwargs...)

Creates a lazy instance of aiembed. It is an instance of AICall with aiembed as the function.

Use exactly the same arguments and keyword arguments as aiembed (see ?aiembed for details).

source


# PromptingTools.Experimental.AgentTools.AIExtractMethod.
julia
AIExtract(args...; kwargs...)

Creates a lazy instance of aiextract. It is an instance of AICall with aiextract as the function.

Use exactly the same arguments and keyword arguments as aiextract (see ?aiextract for details).

source


# PromptingTools.Experimental.AgentTools.AIGenerateMethod.
julia
AIGenerate(args...; kwargs...)

Creates a lazy instance of aigenerate. It is an instance of AICall with aigenerate as the function.

Use exactly the same arguments and keyword arguments as aigenerate (see ?aigenerate for details).

source


# PromptingTools.Experimental.AgentTools.AIScanMethod.
julia
AIScan(args...; kwargs...)

Creates a lazy instance of aiscan. It is an instance of AICall with aiscan as the function.

Use exactly the same arguments and keyword arguments as aiscan (see ?aiscan for details).

source


# PromptingTools.Experimental.AgentTools.add_feedback!Method.
julia
add_feedback!(\n    conversation::AbstractVector{<:PT.AbstractMessage}, sample::SampleNode; feedback_inplace::Bool = false,\n    feedback_template::Symbol = :FeedbackFromEvaluator)

Adds formatted feedback to the conversation based on the sample node feedback (and its ancestors).

Arguments

Example

julia
sample = SampleNode(; data = nothing, feedback = "Feedback X")\nconversation = [PT.UserMessage("I say hi!"), PT.AIMessage(; content = "I say hi!")]\nconversation = AT.add_feedback!(conversation, sample)\nconversation[end].content == "### Feedback from Evaluator\\nFeedback X\\n"\n\nInplace feedback:

julia conversation = [PT.UserMessage("I say hi!"), PT.AIMessage(; content = "I say hi!")] conversation = AT.add_feedback!(conversation, sample; feedback_inplace = true) conversation[end].content == "I say hi!\\n\\n### Feedback from Evaluator\\nFeedback X\\n"

\nSample with ancestors with feedback:

julia sample_p = SampleNode(; data = nothing, feedback = "\\nFeedback X") sample = expand!(sample_p, nothing) sample.feedback = "\\nFeedback Y" conversation = [PT.UserMessage("I say hi!"), PT.AIMessage(; content = "I say hi!")] conversation = AT.add_feedback!(conversation, sample)

conversation[end].content == "### Feedback from Evaluator\\n\\nFeedback X\\n–––––\\n\\nFeedback Y\\n" ```

source


# PromptingTools.Experimental.AgentTools.aicodefixer_feedbackMethod.
julia
aicodefixer_feedback(cb::AICode; max_length::Int = 512) -> NamedTuple(; feedback::String)\naicodefixer_feedback(conversation::AbstractVector{<:PT.AbstractMessage}; max_length::Int = 512) -> NamedTuple(; feedback::String)\naicodefixer_feedback(msg::PT.AIMessage; max_length::Int = 512) -> NamedTuple(; feedback::String)\naicodefixer_feedback(aicall::AICall; max_length::Int = 512) -> NamedTuple(; feedback::String)

Generate feedback for an AI code fixing session based on the AICode block /or conversation history (that will be used to extract and evaluate a code block). Function is designed to be extensible for different types of feedback and code evaluation outcomes.

The highlevel wrapper accepts a conversation and returns new kwargs for the AICall.

Individual feedback functions are dispatched on different subtypes of AbstractCodeOutcome and can be extended/overwritten to provide more detailed feedback.

See also: AIGenerate, AICodeFixer

Arguments

Returns

Example

julia
cb = AICode(msg; skip_unsafe = true, capture_stdout = true)\nnew_kwargs = aicodefixer_feedback(cb)\n\nnew_kwargs = aicodefixer_feedback(msg)\nnew_kwargs = aicodefixer_feedback(conversation)

Notes

This function is part of the AI code fixing system, intended to interact with code in AIMessage and provide feedback on improving it.

The highlevel wrapper accepts a conversation and returns new kwargs for the AICall.

It dispatches for the code feedback based on the subtypes of AbstractCodeOutcome below:

You can override the individual methods to customize the feedback.

source


# PromptingTools.Experimental.AgentTools.airetry!Function.
julia
airetry!(\n    f_cond::Function, aicall::AICallBlock, feedback::Union{AbstractString, Function} = "";\n    verbose::Bool = true, throw::Bool = false, evaluate_all::Bool = true, feedback_expensive::Bool = false,\n    max_retries::Union{Nothing, Int} = nothing, retry_delay::Union{Nothing, Int} = nothing)

Evaluates the condition f_cond on the aicall object. If the condition is not met, it will return the best sample to retry from and provide feedback (string or function) to aicall. That's why it's mutating. It will retry maximum max_retries times, with throw=true, an error will be thrown if the condition is not met after max_retries retries.

Note: aicall must be run first via run!(aicall) before calling airetry!.

Function signatures

You can leverage the last_message, last_output, and AICode functions to access the last message, last output and execute code blocks in the conversation, respectively. See examples below.

Good Use Cases

Gotchas

Arguments

Returns

Example

You can use airetry! to catch API errors in run! and auto-retry the call. RetryConfig is how you influence all the subsequent retry behaviours - see ?RetryConfig for more details.

julia
# API failure because of a non-existent model\nout = AIGenerate("say hi!"; config = RetryConfig(; catch_errors = true),\n    model = "NOTEXIST")\nrun!(out) # fails\n\n# we ask to wait 2s between retries and retry 2 times (can be set in `config` in aicall as well)\nairetry!(isvalid, out; retry_delay = 2, max_retries = 2)

If you provide arguments to the aicall, we try to honor them as much as possible in the following calls, eg, set low verbosity

julia
out = AIGenerate("say hi!"; config = RetryConfig(; catch_errors = true),\nmodel = "NOTEXIST", verbose=false)\nrun!(out)\n# No info message, you just see `success = false` in the properties of the AICall

Let's show a toy example to demonstrate the runtime checks / guardrails for the model output. We'll play a color guessing game (I'm thinking "yellow"):

julia
# Notice that we ask for two samples (`n_samples=2`) at each attempt (to improve our chances). \n# Both guesses are scored at each time step, and the best one is chosen for the next step.\n# And with OpenAI, we can set `api_kwargs = (;n=2)` to get both samples simultaneously (cheaper and faster)!\nout = AIGenerate(\n    "Guess what color I'm thinking. It could be: blue, red, black, white, yellow. Answer with 1 word only";\n    verbose = false,\n    config = RetryConfig(; n_samples = 2), api_kwargs = (; n = 2))\nrun!(out)\n\n\n## Check that the output is 1 word only, third argument is the feedback that will be provided if the condition fails\n## Notice: functions operate on `aicall` as the only argument. We can use utilities like `last_output` and `last_message` to access the last message and output in the conversation.\nairetry!(x -> length(split(last_output(x), r" |\\.")) == 1, out,\n    "You must answer with 1 word only.")\n\n\n## Let's ensure that the output is in lowercase - simple and short\nairetry!(x -> all(islowercase, last_output(x)), out, "You must answer in lowercase.")\n# [ Info: Condition not met. Retrying...\n\n\n## Let's add final hint - it took us 2 retries\nairetry!(x -> startswith(last_output(x), "y"), out, "It starts with "y"")\n# [ Info: Condition not met. Retrying...\n# [ Info: Condition not met. Retrying...\n\n\n## We end up with the correct answer\nlast_output(out)\n# Output: "yellow"

Let's explore how we got here. We save the various attempts in a "tree" (SampleNode object) You can access it in out.samples, which is the ROOT of the tree (top level). Currently "active" sample ID is out.active_sample_id -> that's the same as conversation field in your AICall.

julia
# Root node:\nout.samples\n# Output: SampleNode(id: 46839, stats: 6/12, length: 2)\n\n# Active sample (our correct answer):\nout.active_sample_id \n# Output: 50086\n\n# Let's obtain the active sample node with this ID  - use getindex notation or function find_node\nout.samples[out.active_sample_id]\n# Output: SampleNode(id: 50086, stats: 1/1, length: 7)\n\n# The SampleNode has two key fields: data and feedback. Data is where the conversation is stored:\nactive_sample = out.samples[out.active_sample_id]\nactive_sample.data == out.conversation # Output: true -> This is the winning guess!

We also get a clear view of the tree structure of all samples with print_samples:

julia
julia> print_samples(out.samples)\nSampleNode(id: 46839, stats: 6/12, score: 0.5, length: 2)\n├─ SampleNode(id: 12940, stats: 5/8, score: 1.41, length: 4)\n│  ├─ SampleNode(id: 34315, stats: 3/4, score: 1.77, length: 6)\n│  │  ├─ SampleNode(id: 20493, stats: 1/1, score: 2.67, length: 7)\n│  │  └─ SampleNode(id: 50086, stats: 1/1, score: 2.67, length: 7)\n│  └─ SampleNode(id: 2733, stats: 1/2, score: 1.94, length: 5)\n└─ SampleNode(id: 48343, stats: 1/4, score: 1.36, length: 4)\n   ├─ SampleNode(id: 30088, stats: 0/1, score: 1.67, length: 5)\n   └─ SampleNode(id: 44816, stats: 0/1, score: 1.67, length: 5)

You can use the id to grab and inspect any of these nodes, eg,

julia
out.samples[2733]\n# Output: SampleNode(id: 2733, stats: 1/2, length: 5)

We can also iterate through all samples and extract whatever information we want with PostOrderDFS or PreOrderDFS (exported from AbstractTrees.jl)

julia
for sample in PostOrderDFS(out.samples)\n    # Data is the universal field for samples, we put `conversation` in there\n    # Last item in data is the last message in coversation\n    msg = sample.data[end]\n    if msg isa PT.AIMessage # skip feedback\n        # get only the message content, ie, the guess\n        println("ID: $(sample.id), Answer: $(msg.content)")\n    end\nend\n\n# ID: 20493, Answer: yellow\n# ID: 50086, Answer: yellow\n# ID: 2733, Answer: red\n# ID: 30088, Answer: blue\n# ID: 44816, Answer: blue

Note: airetry! will attempt to fix the model max_retries times. If you set throw=true, it will throw an ErrorException if the condition is not met after max_retries retries.

Let's define a mini program to guess the number and use airetry! to guide the model to the correct answer:

julia
"""\n    llm_guesser()\n\nMini program to guess the number provided by the user (betwee 1-100).\n"""\nfunction llm_guesser(user_number::Int)\n    @assert 1 <= user_number <= 100\n    prompt = """\nI'm thinking a number between 1-100. Guess which one it is. \nYou must respond only with digits and nothing else. \nYour guess:"""\n    ## 2 samples at a time, max 5 fixing rounds\n    out = AIGenerate(prompt; config = RetryConfig(; n_samples = 2, max_retries = 5),\n        api_kwargs = (; n = 2)) |> run!\n    ## Check the proper output format - must parse to Int, use do-syntax\n    ## We can provide feedback via a function!\n    function feedback_f(aicall)\n        "Output: $(last_output(aicall))\nFeedback: You must respond only with digits!!"\n    end\n    airetry!(out, feedback_f) do aicall\n        !isnothing(tryparse(Int, last_output(aicall)))\n    end\n    ## Give a hint on bounds\n    lower_bound = (user_number ÷ 10) * 10\n    upper_bound = lower_bound + 10\n    airetry!(\n        out, "The number is between or equal to $lower_bound to $upper_bound.") do aicall\n        guess = tryparse(Int, last_output(aicall))\n        lower_bound <= guess <= upper_bound\n    end\n    ## You can make at most 3x guess now -- if there is max_retries in `config.max_retries` left\n    max_retries = out.config.retries + 3\n    function feedback_f2(aicall)\n        guess = tryparse(Int, last_output(aicall))\n        "Your guess of $(guess) is wrong, it's $(abs(guess-user_number)) numbers away."\n    end\n    airetry!(out, feedback_f2; max_retries) do aicall\n        tryparse(Int, last_output(aicall)) == user_number\n    end\n\n    ## Evaluate the best guess\n    @info "Results: Guess: $(last_output(out)) vs User: $user_number (Number of calls made: $(out.config.calls))"\n    return out\nend\n\n# Let's play the game\nout = llm_guesser(33)\n[ Info: Condition not met. Retrying...\n[ Info: Condition not met. Retrying...\n[ Info: Condition not met. Retrying...\n[ Info: Condition not met. Retrying...\n[ Info: Results: Guess: 33 vs User: 33 (Number of calls made: 10)

Yay! We got it 😃

Now, we could explore different samples (eg, print_samples(out.samples)) or see what the model guessed at each step:

julia
print_samples(out.samples)\n## SampleNode(id: 57694, stats: 6/14, score: 0.43, length: 2)\n## ├─ SampleNode(id: 35603, stats: 5/10, score: 1.23, length: 4)\n## │  ├─ SampleNode(id: 55394, stats: 1/4, score: 1.32, length: 6)\n## │  │  ├─ SampleNode(id: 20737, stats: 0/1, score: 1.67, length: 7)\n## │  │  └─ SampleNode(id: 52910, stats: 0/1, score: 1.67, length: 7)\n## │  └─ SampleNode(id: 43094, stats: 3/4, score: 1.82, length: 6)\n## │     ├─ SampleNode(id: 14966, stats: 1/1, score: 2.67, length: 7)\n## │     └─ SampleNode(id: 32991, stats: 1/1, score: 2.67, length: 7)\n## └─ SampleNode(id: 20506, stats: 1/4, score: 1.4, length: 4)\n##    ├─ SampleNode(id: 37581, stats: 0/1, score: 1.67, length: 5)\n##    └─ SampleNode(id: 46632, stats: 0/1, score: 1.67, length: 5)\n\n# Lastly, let's check all the guesses AI made across all samples. \n# Our winning guess was ID 32991 (`out.active_sample_id`)\n\nfor sample in PostOrderDFS(out.samples)\n    [println("ID: $(sample.id), Guess: $(msg.content)")\n     for msg in sample.data if msg isa PT.AIMessage]\nend\n## ID: 20737, Guess: 50\n## ID: 20737, Guess: 35\n## ID: 20737, Guess: 37\n## ID: 52910, Guess: 50\n## ID: 52910, Guess: 35\n## ID: 52910, Guess: 32\n## ID: 14966, Guess: 50\n## ID: 14966, Guess: 35\n## ID: 14966, Guess: 33\n## ID: 32991, Guess: 50\n## ID: 32991, Guess: 35\n## ID: 32991, Guess: 33\n## etc...

Note that if there are multiple "branches" the model will see only the feedback of its own and its ancestors not the other "branches". If you wanted to provide ALL feedback, set RetryConfig(; n_samples=1) to remove any "branching". It fixing will be done sequentially in one conversation and the model will see all feedback (less powerful if the model falls into a bad state). Alternatively, you can tweak the feedback function.

See Also

References: airetry is inspired by the Language Agent Tree Search paper and by DSPy Assertions paper.

source


# PromptingTools.Experimental.AgentTools.backpropagate!Method.

Provides scores for a given node (and all its ancestors) based on the evaluation (wins, visits).

source


# PromptingTools.Experimental.AgentTools.beta_sampleMethod.
julia
beta_sample::Real, β::Real)

Approximates a sample from the Beta distribution by generating two independent Gamma distributed samples and using their ratio.

source


# PromptingTools.Experimental.AgentTools.collect_all_feedbackMethod.

Collects all feedback from the node and its ancestors (parents). Returns a string separated by separator.

source


# PromptingTools.Experimental.AgentTools.error_feedbackMethod.
julia
error_feedback(e::Any; max_length::Int = 512)

Set of specialized methods to provide feedback on different types of errors (e).

source


# PromptingTools.Experimental.AgentTools.evaluate_condition!Function.
julia
evaluate_condition!(f_cond::Function, aicall::AICallBlock,\n    feedback::Union{AbstractString, Function} = "";\n    evaluate_all::Bool = true, feedback_expensive::Bool = false)

Evalutes the condition f_cond (must return Bool) on the aicall object. If the condition is not met, it will return the best sample to retry from and provide feedback.

Mutating as the results are saved in aicall.samples

If evaluate_all is true, it will evaluate all the "successful" samples in the aicall object. Otherwise, it will only evaluate the active sample..

For f_cond and feedback functions, you can use the last_message and last_output utilities to access the last message and last output in the conversation, respectively.

Arguments

Returns

Example

julia
# Mimic AIGenerate run!\naicall = AIGenerate("Say hi!"; config = RetryConfig(; n_samples = 2))\nsample = expand!(aicall.samples, aicall.conversation; success = true)\naicall.active_sample_id = sample.id\n\n# Return whether it passed and node to take the next action from\ncond, node = AT.evaluate_condition!(x -> occursin("hi", last_output(x)), aicall)\n\n# Checks:\ncond == true\nnode == sample\nnode.wins == 1

With feedback: ```julia

Mimic AIGenerate run with feedback

aicall = AIGenerate( :BlankSystemUser; system = "a", user = "b") sample = expand!(aicall.samples, aicall.conversation; success = true) aicall.active_sample_id = sample.id

Evaluate

cond, node = AT.evaluate_condition!( x -> occursin("NOTFOUND", last_output(x)), aicall, "Feedback X") cond == false # fail sample == node # same node (no other choice) node.wins == 0 node.feedback == " Feedback X"

source


# PromptingTools.Experimental.AgentTools.expand!Method.

Expands the tree with a new node from parent using the given data and success.

source


# PromptingTools.Experimental.AgentTools.extract_configMethod.

Extracts config::RetryConfig from kwargs and returns the rest of the kwargs.

source


# PromptingTools.Experimental.AgentTools.find_nodeMethod.

Finds a node with a given id in the tree starting from node.

source


# PromptingTools.Experimental.AgentTools.gamma_sampleMethod.
julia
gamma_sample::Real, θ::Real)

Approximates a sample from the Gamma distribution using the Marsaglia and Tsang method.

source


# PromptingTools.Experimental.AgentTools.print_samplesMethod.

Pretty prints the samples tree starting from node. Usually, node is the root of the tree. Example: print_samples(aicall.samples).

source


# PromptingTools.Experimental.AgentTools.remove_used_kwargsMethod.

Removes the kwargs that have already been used in the conversation. Returns NamedTuple.

source


# PromptingTools.Experimental.AgentTools.reset_success!Function.

Sets the success field of all nodes in the tree to success value.

source


# PromptingTools.Experimental.AgentTools.run!Method.
julia
run!(codefixer::AICodeFixer; verbose::Int = 1, max_conversation_length::Int = 32000, run_kwargs...)

Executes the code fixing process encapsulated by the AICodeFixer instance. This method iteratively refines and fixes code by running the AI call in a loop for a specified number of rounds, using feedback from the code evaluation (aicodefixer_feedback) to improve the outcome in each iteration.

Arguments

Returns

Usage

julia
aicall = AICall(aigenerate, schema=mySchema, conversation=myConversation)\ncodefixer = AICodeFixer(aicall, myTemplates; num_rounds=5)\nresult = run!(codefixer, verbose=2)

Notes

source


# PromptingTools.Experimental.AgentTools.run!Method.
julia
run!(aicall::AICallBlock; verbose::Int = 1, catch_errors::Bool = false, return_all::Bool = true, kwargs...)

Executes the AI call wrapped by an AICallBlock instance. This method triggers the actual communication with the AI model and processes the response based on the provided conversation context and parameters.

Note: Currently return_all must always be set to true.

Arguments

Returns

Example

julia
aicall = AICall(aigenerate)\nrun!(aicall)

Alternatively, you can trigger the run! call by using the AICall as a functor and calling it with a string or a UserMessage:

julia
aicall = AICall(aigenerate)\naicall("Say hi!")

Notes

source


# PromptingTools.Experimental.AgentTools.scoreMethod.

Scores a node using the ThomsonSampling method, similar to Bandit algorithms.

source


# PromptingTools.Experimental.AgentTools.scoreMethod.

Scores a node using the UCT (Upper Confidence Bound for Trees) method.

source


# PromptingTools.Experimental.AgentTools.select_bestFunction.
julia
select_best(node::SampleNode, scoring::AbstractScoringMethod = UCT();\n    ordering::Symbol = :PostOrderDFS)

Selects the best node from the tree using the given scoring (UCT or ThompsonSampling). Defaults to UCT. Thompson Sampling is more random with small samples, while UCT stabilizes much quicker thanks to looking at parent nodes as well.

Ordering can be either :PreOrderDFS or :PostOrderDFS. Defaults to :PostOrderDFS, which favors the leaves (end points of the tree).

Example

Compare the different scoring methods:

julia
# Set up mock samples and scores\ndata = PT.AbstractMessage[]\nroot = SampleNode(; data)\nchild1 = expand!(root, data)\nbackpropagate!(child1; wins = 1, visits = 1)\nchild2 = expand!(root, data)\nbackpropagate!(child2; wins = 0, visits = 1)\nchild11 = expand!(child1, data)\nbackpropagate!(child11; wins = 1, visits = 1)\n\n# Select with UCT\nn = select_best(root, UCT())\nSampleNode(id: 29826, stats: 1/1, length: 0)\n\n# Show the tree:\nprint_samples(root; scoring = UCT())\n## SampleNode(id: 13184, stats: 2/3, score: 0.67, length: 0)\n## ├─ SampleNode(id: 26078, stats: 2/2, score: 2.05, length: 0)\n## │  └─ SampleNode(id: 29826, stats: 1/1, score: 2.18, length: 0)\n## └─ SampleNode(id: 39931, stats: 0/1, score: 1.48, length: 0)\n\n# Select with ThompsonSampling - much more random with small samples\nn = select_best(root, ThompsonSampling())\nSampleNode(id: 26078, stats: 2/2, length: 0)\n\n# Show the tree (run it a few times and see how the scores jump around):\nprint_samples(root; scoring = ThompsonSampling())\n## SampleNode(id: 13184, stats: 2/3, score: 0.6, length: 0)\n## ├─ SampleNode(id: 26078, stats: 2/2, score: 0.93, length: 0)\n## │  └─ SampleNode(id: 29826, stats: 1/1, score: 0.22, length: 0)\n## └─ SampleNode(id: 39931, stats: 0/1, score: 0.84, length: 0)

source


# PromptingTools.Experimental.AgentTools.split_multi_samplesMethod.

If the conversation has multiple AIMessage samples, split them into separate conversations with the common past.

source


# PromptingTools.Experimental.AgentTools.truncate_conversationMethod.
julia
truncate_conversation(conversation::AbstractVector{<:PT.AbstractMessage};\n    max_conversation_length::Int = 32000)

Truncates a given conversation to a max_conversation_length characters by removing messages "in the middle". It tries to retain the original system+user message and also the most recent messages.

Practically, if a conversation is too long, it will start by removing the most recent message EXCEPT for the last two (assumed to be the last AIMessage with the code and UserMessage with the feedback

Arguments

max_conversation_length is in characters; assume c. 2-3 characters per LLM token, so 32000 should correspond to 16K context window.

source


# PromptingTools.Experimental.AgentTools.unwrap_aicall_argsMethod.

Unwraps the arguments for AICall and returns the schema and conversation (if provided). Expands any provided AITemplate.

source


# PromptingTools.last_messageMethod.

Helpful accessor for AICall blocks. Returns the last message in the conversation.

source


# PromptingTools.last_outputMethod.

Helpful accessor for AICall blocks. Returns the last output in the conversation (eg, the string/data in the last message).

source


', 76) + createStaticVNode('

Reference for AgentTools

# PromptingTools.Experimental.AgentToolsModule.
julia
AgentTools

Provides Agentic functionality providing lazy calls for building pipelines (eg, AIGenerate) and AICodeFixer.

This module is experimental and may change at any time. It is intended to be moved to a separate package in the future.

source


# PromptingTools.Experimental.AgentTools.AICallType.
julia
AICall(func::F, args...; kwargs...) where {F<:Function}\n\nAIGenerate(args...; kwargs...)\nAIEmbed(args...; kwargs...)\nAIExtract(args...; kwargs...)

A lazy call wrapper for AI functions in the PromptingTools module, such as aigenerate.

The AICall struct is designed to facilitate a deferred execution model (lazy evaluation) for AI functions that interact with a Language Learning Model (LLM). It stores the necessary information for an AI call and executes the underlying AI function only when supplied with a UserMessage or when the run! method is applied. This approach allows for more flexible and efficient handling of AI function calls, especially in interactive environments.

Seel also: run!, AICodeFixer

Fields

Example

Initiate an AICall like any ai* function, eg, AIGenerate:

julia
aicall = AICall(aigenerate)\n\n# With arguments and kwargs like ai* functions\n# from `aigenerate(schema, conversation; model="abc", api_kwargs=(; temperature=0.1))`\n# to\naicall = AICall(aigenerate, schema, conversation; model="abc", api_kwargs=(; temperature=0.1)\n\n# Or with a template\naicall = AIGenerate(:JuliaExpertAsk; ask="xyz", model="abc", api_kwargs=(; temperature=0.1))

Trigger the AICall with run! (it returns the update AICall struct back):

julia
aicall |> run!\n````\n\nYou can also use `AICall` as a functor to trigger the AI call with a `UserMessage` or simply the text to send:

julia aicall(UserMessage("Hello, world!")) # Triggers the lazy call result = run!(aicall) # Explicitly runs the AI call ``` This can be used to "reply" to previous message / continue the stored conversation

Notes

source


# PromptingTools.Experimental.AgentTools.AICodeFixerType.
julia
AICodeFixer(aicall::AICall, templates::Vector{<:PT.UserMessage}; num_rounds::Int = 3, feedback_func::Function = aicodefixer_feedback; kwargs...)\nAICodeFixer(aicall::AICall, template::Union{AITemplate, Symbol} = :CodeFixerRCI; kwargs...)

An AIAgent that iteratively evaluates any received Julia code and provides feedback back to the AI model if num_rounds>0. AICodeFixer manages the lifecycle of a code fixing session, including tracking conversation history, rounds of interaction, and applying user feedback through a specialized feedback function.

It integrates with lazy AI call structures like AIGenerate.

The operation is "lazy", ie, the agent is only executed when needed, eg, when run! is called.

Fields

Note: Any kwargs provided to run!() will be passed to the underlying AICall.

Example

Let's create an AIGenerate call and then pipe it to AICodeFixer to run a few rounds of the coding fixing:

julia
# Create an AIGenerate call\nlazy_call = AIGenerate("Write a function to do XYZ...")\n\n# the action starts only when `run!` is called\nresult = lazy_call |> AICodeFixer |> run!\n\n# Access the result of the code fixing session\n# result.call refers to the AIGenerate lazy call above\nconversation = result.call.conversation\nfixed_code = last(conversation) # usually in the last message\n\n# Preview the conversation history\npreview(conversation)

You can change the template used to provide user feedback and number of counds via arguments:

julia
# Setup an AIGenerate call\nlazy_call = AIGenerate(aigenerate, "Write code to do XYZ...")\n\n# Custom template and 2 fixing rounds\nresult = AICodeFixer(lazy_call, [PT.UserMessage("Please fix the code.\n\nFeedback: {{feedback}}")]; num_rounds = 2) |> run!\n\n# The result now contains the AI's attempts to fix the code\npreview(result.call.conversation)

Notes

source


# PromptingTools.Experimental.AgentTools.RetryConfigType.
julia
RetryConfig

Configuration for self-fixing the AI calls. It includes the following fields:

Fields

source


# PromptingTools.Experimental.AgentTools.SampleNodeType.
julia
SampleNode{T}

A node in the Monte Carlo Tree Search tree.

It's used to hold the data we're trying to optimize/discover (eg, a conversation), the scores from evaluation (wins, visits) and the results of the evaluations upon failure (feedback).

Fields

source


# PromptingTools.Experimental.AgentTools.ThompsonSamplingType.
julia
ThompsonSampling <: AbstractScoringMethod

Implements scoring and selection for Thompson Sampling method. See https://en.wikipedia.org/wiki/Thompson_sampling for more details.

source


# PromptingTools.Experimental.AgentTools.UCTType.
julia
UCT <: AbstractScoringMethod

Implements scoring and selection for UCT (Upper Confidence Bound for Trees) sampling method. See https://en.wikipedia.org/wiki/Monte_Carlo_tree_search#Exploration_and_exploitation for more details.

source


# PromptingTools.Experimental.AgentTools.AIClassifyMethod.
julia
AIClassify(args...; kwargs...)

Creates a lazy instance of aiclassify. It is an instance of AICall with aiclassify as the function.

Use exactly the same arguments and keyword arguments as aiclassify (see ?aiclassify for details).

source


# PromptingTools.Experimental.AgentTools.AIEmbedMethod.
julia
AIEmbed(args...; kwargs...)

Creates a lazy instance of aiembed. It is an instance of AICall with aiembed as the function.

Use exactly the same arguments and keyword arguments as aiembed (see ?aiembed for details).

source


# PromptingTools.Experimental.AgentTools.AIExtractMethod.
julia
AIExtract(args...; kwargs...)

Creates a lazy instance of aiextract. It is an instance of AICall with aiextract as the function.

Use exactly the same arguments and keyword arguments as aiextract (see ?aiextract for details).

source


# PromptingTools.Experimental.AgentTools.AIGenerateMethod.
julia
AIGenerate(args...; kwargs...)

Creates a lazy instance of aigenerate. It is an instance of AICall with aigenerate as the function.

Use exactly the same arguments and keyword arguments as aigenerate (see ?aigenerate for details).

source


# PromptingTools.Experimental.AgentTools.AIScanMethod.
julia
AIScan(args...; kwargs...)

Creates a lazy instance of aiscan. It is an instance of AICall with aiscan as the function.

Use exactly the same arguments and keyword arguments as aiscan (see ?aiscan for details).

source


# PromptingTools.Experimental.AgentTools.add_feedback!Method.
julia
add_feedback!(\n    conversation::AbstractVector{<:PT.AbstractMessage}, sample::SampleNode; feedback_inplace::Bool = false,\n    feedback_template::Symbol = :FeedbackFromEvaluator)

Adds formatted feedback to the conversation based on the sample node feedback (and its ancestors).

Arguments

Example

julia
sample = SampleNode(; data = nothing, feedback = "Feedback X")\nconversation = [PT.UserMessage("I say hi!"), PT.AIMessage(; content = "I say hi!")]\nconversation = AT.add_feedback!(conversation, sample)\nconversation[end].content == "### Feedback from Evaluator\\nFeedback X\\n"\n\nInplace feedback:

julia conversation = [PT.UserMessage("I say hi!"), PT.AIMessage(; content = "I say hi!")] conversation = AT.add_feedback!(conversation, sample; feedback_inplace = true) conversation[end].content == "I say hi!\\n\\n### Feedback from Evaluator\\nFeedback X\\n"

\nSample with ancestors with feedback:

julia sample_p = SampleNode(; data = nothing, feedback = "\\nFeedback X") sample = expand!(sample_p, nothing) sample.feedback = "\\nFeedback Y" conversation = [PT.UserMessage("I say hi!"), PT.AIMessage(; content = "I say hi!")] conversation = AT.add_feedback!(conversation, sample)

conversation[end].content == "### Feedback from Evaluator\\n\\nFeedback X\\n–––––\\n\\nFeedback Y\\n" ```

source


# PromptingTools.Experimental.AgentTools.aicodefixer_feedbackMethod.
julia
aicodefixer_feedback(cb::AICode; max_length::Int = 512) -> NamedTuple(; feedback::String)\naicodefixer_feedback(conversation::AbstractVector{<:PT.AbstractMessage}; max_length::Int = 512) -> NamedTuple(; feedback::String)\naicodefixer_feedback(msg::PT.AIMessage; max_length::Int = 512) -> NamedTuple(; feedback::String)\naicodefixer_feedback(aicall::AICall; max_length::Int = 512) -> NamedTuple(; feedback::String)

Generate feedback for an AI code fixing session based on the AICode block /or conversation history (that will be used to extract and evaluate a code block). Function is designed to be extensible for different types of feedback and code evaluation outcomes.

The highlevel wrapper accepts a conversation and returns new kwargs for the AICall.

Individual feedback functions are dispatched on different subtypes of AbstractCodeOutcome and can be extended/overwritten to provide more detailed feedback.

See also: AIGenerate, AICodeFixer

Arguments

Returns

Example

julia
cb = AICode(msg; skip_unsafe = true, capture_stdout = true)\nnew_kwargs = aicodefixer_feedback(cb)\n\nnew_kwargs = aicodefixer_feedback(msg)\nnew_kwargs = aicodefixer_feedback(conversation)

Notes

This function is part of the AI code fixing system, intended to interact with code in AIMessage and provide feedback on improving it.

The highlevel wrapper accepts a conversation and returns new kwargs for the AICall.

It dispatches for the code feedback based on the subtypes of AbstractCodeOutcome below:

You can override the individual methods to customize the feedback.

source


# PromptingTools.Experimental.AgentTools.airetry!Function.
julia
airetry!(\n    f_cond::Function, aicall::AICallBlock, feedback::Union{AbstractString, Function} = "";\n    verbose::Bool = true, throw::Bool = false, evaluate_all::Bool = true, feedback_expensive::Bool = false,\n    max_retries::Union{Nothing, Int} = nothing, retry_delay::Union{Nothing, Int} = nothing)

Evaluates the condition f_cond on the aicall object. If the condition is not met, it will return the best sample to retry from and provide feedback (string or function) to aicall. That's why it's mutating. It will retry maximum max_retries times, with throw=true, an error will be thrown if the condition is not met after max_retries retries.

Note: aicall must be run first via run!(aicall) before calling airetry!.

Function signatures

You can leverage the last_message, last_output, and AICode functions to access the last message, last output and execute code blocks in the conversation, respectively. See examples below.

Good Use Cases

Gotchas

Arguments

Returns

Example

You can use airetry! to catch API errors in run! and auto-retry the call. RetryConfig is how you influence all the subsequent retry behaviours - see ?RetryConfig for more details.

julia
# API failure because of a non-existent model\nout = AIGenerate("say hi!"; config = RetryConfig(; catch_errors = true),\n    model = "NOTEXIST")\nrun!(out) # fails\n\n# we ask to wait 2s between retries and retry 2 times (can be set in `config` in aicall as well)\nairetry!(isvalid, out; retry_delay = 2, max_retries = 2)

If you provide arguments to the aicall, we try to honor them as much as possible in the following calls, eg, set low verbosity

julia
out = AIGenerate("say hi!"; config = RetryConfig(; catch_errors = true),\nmodel = "NOTEXIST", verbose=false)\nrun!(out)\n# No info message, you just see `success = false` in the properties of the AICall

Let's show a toy example to demonstrate the runtime checks / guardrails for the model output. We'll play a color guessing game (I'm thinking "yellow"):

julia
# Notice that we ask for two samples (`n_samples=2`) at each attempt (to improve our chances). \n# Both guesses are scored at each time step, and the best one is chosen for the next step.\n# And with OpenAI, we can set `api_kwargs = (;n=2)` to get both samples simultaneously (cheaper and faster)!\nout = AIGenerate(\n    "Guess what color I'm thinking. It could be: blue, red, black, white, yellow. Answer with 1 word only";\n    verbose = false,\n    config = RetryConfig(; n_samples = 2), api_kwargs = (; n = 2))\nrun!(out)\n\n\n## Check that the output is 1 word only, third argument is the feedback that will be provided if the condition fails\n## Notice: functions operate on `aicall` as the only argument. We can use utilities like `last_output` and `last_message` to access the last message and output in the conversation.\nairetry!(x -> length(split(last_output(x), r" |\\.")) == 1, out,\n    "You must answer with 1 word only.")\n\n\n## Let's ensure that the output is in lowercase - simple and short\nairetry!(x -> all(islowercase, last_output(x)), out, "You must answer in lowercase.")\n# [ Info: Condition not met. Retrying...\n\n\n## Let's add final hint - it took us 2 retries\nairetry!(x -> startswith(last_output(x), "y"), out, "It starts with "y"")\n# [ Info: Condition not met. Retrying...\n# [ Info: Condition not met. Retrying...\n\n\n## We end up with the correct answer\nlast_output(out)\n# Output: "yellow"

Let's explore how we got here. We save the various attempts in a "tree" (SampleNode object) You can access it in out.samples, which is the ROOT of the tree (top level). Currently "active" sample ID is out.active_sample_id -> that's the same as conversation field in your AICall.

julia
# Root node:\nout.samples\n# Output: SampleNode(id: 46839, stats: 6/12, length: 2)\n\n# Active sample (our correct answer):\nout.active_sample_id \n# Output: 50086\n\n# Let's obtain the active sample node with this ID  - use getindex notation or function find_node\nout.samples[out.active_sample_id]\n# Output: SampleNode(id: 50086, stats: 1/1, length: 7)\n\n# The SampleNode has two key fields: data and feedback. Data is where the conversation is stored:\nactive_sample = out.samples[out.active_sample_id]\nactive_sample.data == out.conversation # Output: true -> This is the winning guess!

We also get a clear view of the tree structure of all samples with print_samples:

julia
julia> print_samples(out.samples)\nSampleNode(id: 46839, stats: 6/12, score: 0.5, length: 2)\n├─ SampleNode(id: 12940, stats: 5/8, score: 1.41, length: 4)\n│  ├─ SampleNode(id: 34315, stats: 3/4, score: 1.77, length: 6)\n│  │  ├─ SampleNode(id: 20493, stats: 1/1, score: 2.67, length: 7)\n│  │  └─ SampleNode(id: 50086, stats: 1/1, score: 2.67, length: 7)\n│  └─ SampleNode(id: 2733, stats: 1/2, score: 1.94, length: 5)\n└─ SampleNode(id: 48343, stats: 1/4, score: 1.36, length: 4)\n   ├─ SampleNode(id: 30088, stats: 0/1, score: 1.67, length: 5)\n   └─ SampleNode(id: 44816, stats: 0/1, score: 1.67, length: 5)

You can use the id to grab and inspect any of these nodes, eg,

julia
out.samples[2733]\n# Output: SampleNode(id: 2733, stats: 1/2, length: 5)

We can also iterate through all samples and extract whatever information we want with PostOrderDFS or PreOrderDFS (exported from AbstractTrees.jl)

julia
for sample in PostOrderDFS(out.samples)\n    # Data is the universal field for samples, we put `conversation` in there\n    # Last item in data is the last message in coversation\n    msg = sample.data[end]\n    if msg isa PT.AIMessage # skip feedback\n        # get only the message content, ie, the guess\n        println("ID: $(sample.id), Answer: $(msg.content)")\n    end\nend\n\n# ID: 20493, Answer: yellow\n# ID: 50086, Answer: yellow\n# ID: 2733, Answer: red\n# ID: 30088, Answer: blue\n# ID: 44816, Answer: blue

Note: airetry! will attempt to fix the model max_retries times. If you set throw=true, it will throw an ErrorException if the condition is not met after max_retries retries.

Let's define a mini program to guess the number and use airetry! to guide the model to the correct answer:

julia
"""\n    llm_guesser()\n\nMini program to guess the number provided by the user (betwee 1-100).\n"""\nfunction llm_guesser(user_number::Int)\n    @assert 1 <= user_number <= 100\n    prompt = """\nI'm thinking a number between 1-100. Guess which one it is. \nYou must respond only with digits and nothing else. \nYour guess:"""\n    ## 2 samples at a time, max 5 fixing rounds\n    out = AIGenerate(prompt; config = RetryConfig(; n_samples = 2, max_retries = 5),\n        api_kwargs = (; n = 2)) |> run!\n    ## Check the proper output format - must parse to Int, use do-syntax\n    ## We can provide feedback via a function!\n    function feedback_f(aicall)\n        "Output: $(last_output(aicall))\nFeedback: You must respond only with digits!!"\n    end\n    airetry!(out, feedback_f) do aicall\n        !isnothing(tryparse(Int, last_output(aicall)))\n    end\n    ## Give a hint on bounds\n    lower_bound = (user_number ÷ 10) * 10\n    upper_bound = lower_bound + 10\n    airetry!(\n        out, "The number is between or equal to $lower_bound to $upper_bound.") do aicall\n        guess = tryparse(Int, last_output(aicall))\n        lower_bound <= guess <= upper_bound\n    end\n    ## You can make at most 3x guess now -- if there is max_retries in `config.max_retries` left\n    max_retries = out.config.retries + 3\n    function feedback_f2(aicall)\n        guess = tryparse(Int, last_output(aicall))\n        "Your guess of $(guess) is wrong, it's $(abs(guess-user_number)) numbers away."\n    end\n    airetry!(out, feedback_f2; max_retries) do aicall\n        tryparse(Int, last_output(aicall)) == user_number\n    end\n\n    ## Evaluate the best guess\n    @info "Results: Guess: $(last_output(out)) vs User: $user_number (Number of calls made: $(out.config.calls))"\n    return out\nend\n\n# Let's play the game\nout = llm_guesser(33)\n[ Info: Condition not met. Retrying...\n[ Info: Condition not met. Retrying...\n[ Info: Condition not met. Retrying...\n[ Info: Condition not met. Retrying...\n[ Info: Results: Guess: 33 vs User: 33 (Number of calls made: 10)

Yay! We got it 😃

Now, we could explore different samples (eg, print_samples(out.samples)) or see what the model guessed at each step:

julia
print_samples(out.samples)\n## SampleNode(id: 57694, stats: 6/14, score: 0.43, length: 2)\n## ├─ SampleNode(id: 35603, stats: 5/10, score: 1.23, length: 4)\n## │  ├─ SampleNode(id: 55394, stats: 1/4, score: 1.32, length: 6)\n## │  │  ├─ SampleNode(id: 20737, stats: 0/1, score: 1.67, length: 7)\n## │  │  └─ SampleNode(id: 52910, stats: 0/1, score: 1.67, length: 7)\n## │  └─ SampleNode(id: 43094, stats: 3/4, score: 1.82, length: 6)\n## │     ├─ SampleNode(id: 14966, stats: 1/1, score: 2.67, length: 7)\n## │     └─ SampleNode(id: 32991, stats: 1/1, score: 2.67, length: 7)\n## └─ SampleNode(id: 20506, stats: 1/4, score: 1.4, length: 4)\n##    ├─ SampleNode(id: 37581, stats: 0/1, score: 1.67, length: 5)\n##    └─ SampleNode(id: 46632, stats: 0/1, score: 1.67, length: 5)\n\n# Lastly, let's check all the guesses AI made across all samples. \n# Our winning guess was ID 32991 (`out.active_sample_id`)\n\nfor sample in PostOrderDFS(out.samples)\n    [println("ID: $(sample.id), Guess: $(msg.content)")\n     for msg in sample.data if msg isa PT.AIMessage]\nend\n## ID: 20737, Guess: 50\n## ID: 20737, Guess: 35\n## ID: 20737, Guess: 37\n## ID: 52910, Guess: 50\n## ID: 52910, Guess: 35\n## ID: 52910, Guess: 32\n## ID: 14966, Guess: 50\n## ID: 14966, Guess: 35\n## ID: 14966, Guess: 33\n## ID: 32991, Guess: 50\n## ID: 32991, Guess: 35\n## ID: 32991, Guess: 33\n## etc...

Note that if there are multiple "branches" the model will see only the feedback of its own and its ancestors not the other "branches". If you wanted to provide ALL feedback, set RetryConfig(; n_samples=1) to remove any "branching". It fixing will be done sequentially in one conversation and the model will see all feedback (less powerful if the model falls into a bad state). Alternatively, you can tweak the feedback function.

See Also

References: airetry is inspired by the Language Agent Tree Search paper and by DSPy Assertions paper.

source


# PromptingTools.Experimental.AgentTools.backpropagate!Method.

Provides scores for a given node (and all its ancestors) based on the evaluation (wins, visits).

source


# PromptingTools.Experimental.AgentTools.beta_sampleMethod.
julia
beta_sample::Real, β::Real)

Approximates a sample from the Beta distribution by generating two independent Gamma distributed samples and using their ratio.

source


# PromptingTools.Experimental.AgentTools.collect_all_feedbackMethod.

Collects all feedback from the node and its ancestors (parents). Returns a string separated by separator.

source


# PromptingTools.Experimental.AgentTools.error_feedbackMethod.
julia
error_feedback(e::Any; max_length::Int = 512)

Set of specialized methods to provide feedback on different types of errors (e).

source


# PromptingTools.Experimental.AgentTools.evaluate_condition!Function.
julia
evaluate_condition!(f_cond::Function, aicall::AICallBlock,\n    feedback::Union{AbstractString, Function} = "";\n    evaluate_all::Bool = true, feedback_expensive::Bool = false)

Evalutes the condition f_cond (must return Bool) on the aicall object. If the condition is not met, it will return the best sample to retry from and provide feedback.

Mutating as the results are saved in aicall.samples

If evaluate_all is true, it will evaluate all the "successful" samples in the aicall object. Otherwise, it will only evaluate the active sample..

For f_cond and feedback functions, you can use the last_message and last_output utilities to access the last message and last output in the conversation, respectively.

Arguments

Returns

Example

julia
# Mimic AIGenerate run!\naicall = AIGenerate("Say hi!"; config = RetryConfig(; n_samples = 2))\nsample = expand!(aicall.samples, aicall.conversation; success = true)\naicall.active_sample_id = sample.id\n\n# Return whether it passed and node to take the next action from\ncond, node = AT.evaluate_condition!(x -> occursin("hi", last_output(x)), aicall)\n\n# Checks:\ncond == true\nnode == sample\nnode.wins == 1

With feedback: ```julia

Mimic AIGenerate run with feedback

aicall = AIGenerate( :BlankSystemUser; system = "a", user = "b") sample = expand!(aicall.samples, aicall.conversation; success = true) aicall.active_sample_id = sample.id

Evaluate

cond, node = AT.evaluate_condition!( x -> occursin("NOTFOUND", last_output(x)), aicall, "Feedback X") cond == false # fail sample == node # same node (no other choice) node.wins == 0 node.feedback == " Feedback X"

source


# PromptingTools.Experimental.AgentTools.expand!Method.

Expands the tree with a new node from parent using the given data and success.

source


# PromptingTools.Experimental.AgentTools.extract_configMethod.

Extracts config::RetryConfig from kwargs and returns the rest of the kwargs.

source


# PromptingTools.Experimental.AgentTools.find_nodeMethod.

Finds a node with a given id in the tree starting from node.

source


# PromptingTools.Experimental.AgentTools.gamma_sampleMethod.
julia
gamma_sample::Real, θ::Real)

Approximates a sample from the Gamma distribution using the Marsaglia and Tsang method.

source


# PromptingTools.Experimental.AgentTools.print_samplesMethod.

Pretty prints the samples tree starting from node. Usually, node is the root of the tree. Example: print_samples(aicall.samples).

source


# PromptingTools.Experimental.AgentTools.remove_used_kwargsMethod.

Removes the kwargs that have already been used in the conversation. Returns NamedTuple.

source


# PromptingTools.Experimental.AgentTools.reset_success!Function.

Sets the success field of all nodes in the tree to success value.

source


# PromptingTools.Experimental.AgentTools.run!Method.
julia
run!(codefixer::AICodeFixer; verbose::Int = 1, max_conversation_length::Int = 32000, run_kwargs...)

Executes the code fixing process encapsulated by the AICodeFixer instance. This method iteratively refines and fixes code by running the AI call in a loop for a specified number of rounds, using feedback from the code evaluation (aicodefixer_feedback) to improve the outcome in each iteration.

Arguments

Returns

Usage

julia
aicall = AICall(aigenerate, schema=mySchema, conversation=myConversation)\ncodefixer = AICodeFixer(aicall, myTemplates; num_rounds=5)\nresult = run!(codefixer, verbose=2)

Notes

source


# PromptingTools.Experimental.AgentTools.run!Method.
julia
run!(aicall::AICallBlock; verbose::Int = 1, catch_errors::Bool = false, return_all::Bool = true, kwargs...)

Executes the AI call wrapped by an AICallBlock instance. This method triggers the actual communication with the AI model and processes the response based on the provided conversation context and parameters.

Note: Currently return_all must always be set to true.

Arguments

Returns

Example

julia
aicall = AICall(aigenerate)\nrun!(aicall)

Alternatively, you can trigger the run! call by using the AICall as a functor and calling it with a string or a UserMessage:

julia
aicall = AICall(aigenerate)\naicall("Say hi!")

Notes

source


# PromptingTools.Experimental.AgentTools.scoreMethod.

Scores a node using the ThomsonSampling method, similar to Bandit algorithms.

source


# PromptingTools.Experimental.AgentTools.scoreMethod.

Scores a node using the UCT (Upper Confidence Bound for Trees) method.

source


# PromptingTools.Experimental.AgentTools.select_bestFunction.
julia
select_best(node::SampleNode, scoring::AbstractScoringMethod = UCT();\n    ordering::Symbol = :PostOrderDFS)

Selects the best node from the tree using the given scoring (UCT or ThompsonSampling). Defaults to UCT. Thompson Sampling is more random with small samples, while UCT stabilizes much quicker thanks to looking at parent nodes as well.

Ordering can be either :PreOrderDFS or :PostOrderDFS. Defaults to :PostOrderDFS, which favors the leaves (end points of the tree).

Example

Compare the different scoring methods:

julia
# Set up mock samples and scores\ndata = PT.AbstractMessage[]\nroot = SampleNode(; data)\nchild1 = expand!(root, data)\nbackpropagate!(child1; wins = 1, visits = 1)\nchild2 = expand!(root, data)\nbackpropagate!(child2; wins = 0, visits = 1)\nchild11 = expand!(child1, data)\nbackpropagate!(child11; wins = 1, visits = 1)\n\n# Select with UCT\nn = select_best(root, UCT())\nSampleNode(id: 29826, stats: 1/1, length: 0)\n\n# Show the tree:\nprint_samples(root; scoring = UCT())\n## SampleNode(id: 13184, stats: 2/3, score: 0.67, length: 0)\n## ├─ SampleNode(id: 26078, stats: 2/2, score: 2.05, length: 0)\n## │  └─ SampleNode(id: 29826, stats: 1/1, score: 2.18, length: 0)\n## └─ SampleNode(id: 39931, stats: 0/1, score: 1.48, length: 0)\n\n# Select with ThompsonSampling - much more random with small samples\nn = select_best(root, ThompsonSampling())\nSampleNode(id: 26078, stats: 2/2, length: 0)\n\n# Show the tree (run it a few times and see how the scores jump around):\nprint_samples(root; scoring = ThompsonSampling())\n## SampleNode(id: 13184, stats: 2/3, score: 0.6, length: 0)\n## ├─ SampleNode(id: 26078, stats: 2/2, score: 0.93, length: 0)\n## │  └─ SampleNode(id: 29826, stats: 1/1, score: 0.22, length: 0)\n## └─ SampleNode(id: 39931, stats: 0/1, score: 0.84, length: 0)

source


# PromptingTools.Experimental.AgentTools.split_multi_samplesMethod.

If the conversation has multiple AIMessage samples, split them into separate conversations with the common past.

source


# PromptingTools.Experimental.AgentTools.truncate_conversationMethod.
julia
truncate_conversation(conversation::AbstractVector{<:PT.AbstractMessage};\n    max_conversation_length::Int = 32000)

Truncates a given conversation to a max_conversation_length characters by removing messages "in the middle". It tries to retain the original system+user message and also the most recent messages.

Practically, if a conversation is too long, it will start by removing the most recent message EXCEPT for the last two (assumed to be the last AIMessage with the code and UserMessage with the feedback

Arguments

max_conversation_length is in characters; assume c. 2-3 characters per LLM token, so 32000 should correspond to 16K context window.

source


# PromptingTools.Experimental.AgentTools.unwrap_aicall_argsMethod.

Unwraps the arguments for AICall and returns the schema and conversation (if provided). Expands any provided AITemplate.

source


# PromptingTools.last_messageMethod.

Helpful accessor for AICall blocks. Returns the last message in the conversation.

source


# PromptingTools.last_outputMethod.

Helpful accessor for AICall blocks. Returns the last output in the conversation (eg, the string/data in the last message).

source


', 76) ])); } const reference_agenttools = /* @__PURE__ */ _export_sfc(_sfc_main, [["render", _sfc_render]]); diff --git a/previews/PR218/assets/reference_apitools.md.DVDc6Kr1.js b/previews/PR218/assets/reference_apitools.md.BVdQH4AZ.js similarity index 97% rename from previews/PR218/assets/reference_apitools.md.DVDc6Kr1.js rename to previews/PR218/assets/reference_apitools.md.BVdQH4AZ.js index 1b3491466..1fdb29c03 100644 --- a/previews/PR218/assets/reference_apitools.md.DVDc6Kr1.js +++ b/previews/PR218/assets/reference_apitools.md.BVdQH4AZ.js @@ -3,7 +3,7 @@ const __pageData = JSON.parse('{"title":"Reference for APITools","description":" const _sfc_main = { name: "reference_apitools.md" }; function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { return openBlock(), createElementBlock("div", null, _cache[0] || (_cache[0] = [ - createStaticVNode('

Reference for APITools

# PromptingTools.Experimental.APITools.create_websearchMethod.
julia
create_websearch(query::AbstractString;\n    api_key::AbstractString,\n    search_depth::AbstractString = "basic")

Arguments

Example

julia
r = create_websearch("Who is King Charles?")

Even better, you can get not just the results but also the answer:

julia
r = create_websearch("Who is King Charles?"; include_answer = true)

See Rest API documentation for more information.

source


# PromptingTools.Experimental.APITools.tavily_apiMethod.
julia
tavily_api(;\n    api_key::AbstractString,\n    endpoint::String = "search",\n    url::AbstractString = "https://api.tavily.com",\n    http_kwargs::NamedTuple = NamedTuple(),\n    kwargs...)

Sends API requests to Tavily and returns the response.

source


', 6) + createStaticVNode('

Reference for APITools

# PromptingTools.Experimental.APITools.create_websearchMethod.
julia
create_websearch(query::AbstractString;\n    api_key::AbstractString,\n    search_depth::AbstractString = "basic")

Arguments

Example

julia
r = create_websearch("Who is King Charles?")

Even better, you can get not just the results but also the answer:

julia
r = create_websearch("Who is King Charles?"; include_answer = true)

See Rest API documentation for more information.

source


# PromptingTools.Experimental.APITools.tavily_apiMethod.
julia
tavily_api(;\n    api_key::AbstractString,\n    endpoint::String = "search",\n    url::AbstractString = "https://api.tavily.com",\n    http_kwargs::NamedTuple = NamedTuple(),\n    kwargs...)

Sends API requests to Tavily and returns the response.

source


', 6) ])); } const reference_apitools = /* @__PURE__ */ _export_sfc(_sfc_main, [["render", _sfc_render]]); diff --git a/previews/PR218/assets/reference_apitools.md.DVDc6Kr1.lean.js b/previews/PR218/assets/reference_apitools.md.BVdQH4AZ.lean.js similarity index 97% rename from previews/PR218/assets/reference_apitools.md.DVDc6Kr1.lean.js rename to previews/PR218/assets/reference_apitools.md.BVdQH4AZ.lean.js index 1b3491466..1fdb29c03 100644 --- a/previews/PR218/assets/reference_apitools.md.DVDc6Kr1.lean.js +++ b/previews/PR218/assets/reference_apitools.md.BVdQH4AZ.lean.js @@ -3,7 +3,7 @@ const __pageData = JSON.parse('{"title":"Reference for APITools","description":" const _sfc_main = { name: "reference_apitools.md" }; function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { return openBlock(), createElementBlock("div", null, _cache[0] || (_cache[0] = [ - createStaticVNode('

Reference for APITools

# PromptingTools.Experimental.APITools.create_websearchMethod.
julia
create_websearch(query::AbstractString;\n    api_key::AbstractString,\n    search_depth::AbstractString = "basic")

Arguments

Example

julia
r = create_websearch("Who is King Charles?")

Even better, you can get not just the results but also the answer:

julia
r = create_websearch("Who is King Charles?"; include_answer = true)

See Rest API documentation for more information.

source


# PromptingTools.Experimental.APITools.tavily_apiMethod.
julia
tavily_api(;\n    api_key::AbstractString,\n    endpoint::String = "search",\n    url::AbstractString = "https://api.tavily.com",\n    http_kwargs::NamedTuple = NamedTuple(),\n    kwargs...)

Sends API requests to Tavily and returns the response.

source


', 6) + createStaticVNode('

Reference for APITools

# PromptingTools.Experimental.APITools.create_websearchMethod.
julia
create_websearch(query::AbstractString;\n    api_key::AbstractString,\n    search_depth::AbstractString = "basic")

Arguments

Example

julia
r = create_websearch("Who is King Charles?")

Even better, you can get not just the results but also the answer:

julia
r = create_websearch("Who is King Charles?"; include_answer = true)

See Rest API documentation for more information.

source


# PromptingTools.Experimental.APITools.tavily_apiMethod.
julia
tavily_api(;\n    api_key::AbstractString,\n    endpoint::String = "search",\n    url::AbstractString = "https://api.tavily.com",\n    http_kwargs::NamedTuple = NamedTuple(),\n    kwargs...)

Sends API requests to Tavily and returns the response.

source


', 6) ])); } const reference_apitools = /* @__PURE__ */ _export_sfc(_sfc_main, [["render", _sfc_render]]); diff --git a/previews/PR218/assets/reference_experimental.md.BNmmZjbx.js b/previews/PR218/assets/reference_experimental.md.DW1f4gT-.js similarity index 95% rename from previews/PR218/assets/reference_experimental.md.BNmmZjbx.js rename to previews/PR218/assets/reference_experimental.md.DW1f4gT-.js index 3a08650a9..288b4b8ef 100644 --- a/previews/PR218/assets/reference_experimental.md.BNmmZjbx.js +++ b/previews/PR218/assets/reference_experimental.md.DW1f4gT-.js @@ -3,7 +3,7 @@ const __pageData = JSON.parse('{"title":"Reference for Experimental Module","des const _sfc_main = { name: "reference_experimental.md" }; function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { return openBlock(), createElementBlock("div", null, _cache[0] || (_cache[0] = [ - createStaticVNode('

Reference for Experimental Module

Note: This module is experimental and may change in future releases. The intention is for the functionality to be moved to separate packages over time.

# PromptingTools.ExperimentalModule.
julia
Experimental

This module is for experimental code that is not yet ready for production. It is not included in the main module, so it must be explicitly imported.

Contains:

source


', 5) + createStaticVNode('

Reference for Experimental Module

Note: This module is experimental and may change in future releases. The intention is for the functionality to be moved to separate packages over time.

# PromptingTools.ExperimentalModule.
julia
Experimental

This module is for experimental code that is not yet ready for production. It is not included in the main module, so it must be explicitly imported.

Contains:

source


', 5) ])); } const reference_experimental = /* @__PURE__ */ _export_sfc(_sfc_main, [["render", _sfc_render]]); diff --git a/previews/PR218/assets/reference_experimental.md.BNmmZjbx.lean.js b/previews/PR218/assets/reference_experimental.md.DW1f4gT-.lean.js similarity index 95% rename from previews/PR218/assets/reference_experimental.md.BNmmZjbx.lean.js rename to previews/PR218/assets/reference_experimental.md.DW1f4gT-.lean.js index 3a08650a9..288b4b8ef 100644 --- a/previews/PR218/assets/reference_experimental.md.BNmmZjbx.lean.js +++ b/previews/PR218/assets/reference_experimental.md.DW1f4gT-.lean.js @@ -3,7 +3,7 @@ const __pageData = JSON.parse('{"title":"Reference for Experimental Module","des const _sfc_main = { name: "reference_experimental.md" }; function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { return openBlock(), createElementBlock("div", null, _cache[0] || (_cache[0] = [ - createStaticVNode('

Reference for Experimental Module

Note: This module is experimental and may change in future releases. The intention is for the functionality to be moved to separate packages over time.

# PromptingTools.ExperimentalModule.
julia
Experimental

This module is for experimental code that is not yet ready for production. It is not included in the main module, so it must be explicitly imported.

Contains:

source


', 5) + createStaticVNode('

Reference for Experimental Module

Note: This module is experimental and may change in future releases. The intention is for the functionality to be moved to separate packages over time.

# PromptingTools.ExperimentalModule.
julia
Experimental

This module is for experimental code that is not yet ready for production. It is not included in the main module, so it must be explicitly imported.

Contains:

source


', 5) ])); } const reference_experimental = /* @__PURE__ */ _export_sfc(_sfc_main, [["render", _sfc_render]]); diff --git a/previews/PR218/assets/reference_ragtools.md.gYCqDEbn.lean.js b/previews/PR218/assets/reference_ragtools.md.Bby7eP61.js similarity index 93% rename from previews/PR218/assets/reference_ragtools.md.gYCqDEbn.lean.js rename to previews/PR218/assets/reference_ragtools.md.Bby7eP61.js index 0b8c465d3..2545f9b0d 100644 --- a/previews/PR218/assets/reference_ragtools.md.gYCqDEbn.lean.js +++ b/previews/PR218/assets/reference_ragtools.md.Bby7eP61.js @@ -3,7 +3,7 @@ const __pageData = JSON.parse('{"title":"Reference for RAGTools","description":" const _sfc_main = { name: "reference_ragtools.md" }; function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { return openBlock(), createElementBlock("div", null, _cache[0] || (_cache[0] = [ - createStaticVNode('

Reference for RAGTools

# PromptingTools.Experimental.RAGToolsModule.
julia
RAGTools

Provides Retrieval-Augmented Generation (RAG) functionality.

Requires: LinearAlgebra, SparseArrays, Unicode, PromptingTools for proper functionality.

This module is experimental and may change at any time. It is intended to be moved to a separate package in the future.

source


# PromptingTools.Experimental.RAGTools.AbstractCandidateChunksType.
julia
AbstractCandidateChunks

Abstract type for storing candidate chunks, ie, references to items in a AbstractChunkIndex.

Return type from find_closest and find_tags functions.

Required Fields

source


# PromptingTools.Experimental.RAGTools.AbstractChunkIndexType.
julia
AbstractChunkIndex <: AbstractDocumentIndex

Main abstract type for storing document chunks and their embeddings. It also stores tags and sources for each chunk.

Required Fields

source


# PromptingTools.Experimental.RAGTools.AbstractGeneratorType.
julia
AbstractGenerator <: AbstractGenerationMethod

Abstract type for generating an answer with generate! (use to change the process / return type of generate).

Required Fields

source


# PromptingTools.Experimental.RAGTools.AbstractIndexBuilderType.
julia
AbstractIndexBuilder

Abstract type for building an index with build_index (use to change the process / return type of build_index).

Required Fields

source


# PromptingTools.Experimental.RAGTools.AbstractMultiIndexType.
julia
AbstractMultiIndex <: AbstractDocumentIndex

Experimental abstract type for storing multiple document indexes. Not yet implemented.

source


# PromptingTools.Experimental.RAGTools.AbstractRetrieverType.
julia
AbstractRetriever <: AbstractRetrievalMethod

Abstract type for retrieving chunks from an index with retrieve (use to change the process / return type of retrieve).

Required Fields

source


# PromptingTools.Experimental.RAGTools.AdvancedGeneratorType.
julia
AdvancedGenerator <: AbstractGenerator

Default implementation for generate!. It simply enumerates context snippets and runs aigenerate (no refinement).

It uses ContextEnumerator, SimpleAnswerer, SimpleRefiner, and NoPostprocessor as default contexter, answerer, refiner, and postprocessor.

source


# PromptingTools.Experimental.RAGTools.AdvancedRetrieverType.
julia
AdvancedRetriever <: AbstractRetriever

Dispatch for retrieve with advanced retrieval methods to improve result quality. Compared to SimpleRetriever, it adds rephrasing the query and reranking the results.

Fields

source


# PromptingTools.Experimental.RAGTools.AllTagFilterType.
julia
AllTagFilter <: AbstractTagFilter

Finds the chunks that have ALL OF the specified tag(s). A method for find_tags.

source


# PromptingTools.Experimental.RAGTools.AnnotatedNodeType.
julia
AnnotatedNode{T}  <: AbstractAnnotatedNode

A node to add annotations to the generated answer in airag

Annotations can be: sources, scores, whether its supported or not by the context, etc.

Fields

source


# PromptingTools.Experimental.RAGTools.AnyTagFilterType.
julia
AnyTagFilter <: AbstractTagFilter

Finds the chunks that have ANY OF the specified tag(s). A method for find_tags.

source


# PromptingTools.Experimental.RAGTools.BM25SimilarityType.
julia
BM25Similarity <: AbstractSimilarityFinder

Finds the closest chunks to a query embedding by measuring the BM25 similarity between the query and the chunks' embeddings in binary form. A method for find_closest.

Reference: Wikipedia: BM25. Implementation follows: The Next Generation of Lucene Relevance.

source


# PromptingTools.Experimental.RAGTools.BatchEmbedderType.
julia
BatchEmbedder <: AbstractEmbedder

Default embedder for get_embeddings functions. It passes individual documents to be embedded in chunks to aiembed.

source


# PromptingTools.Experimental.RAGTools.BinaryBatchEmbedderType.
julia
BinaryBatchEmbedder <: AbstractEmbedder

Same as BatchEmbedder but reduces the embeddings matrix to a binary form (eg, BitMatrix). Defines a method for get_embeddings.

Reference: HuggingFace: Embedding Quantization.

source


# PromptingTools.Experimental.RAGTools.BinaryCosineSimilarityType.
julia
BinaryCosineSimilarity <: AbstractSimilarityFinder

Finds the closest chunks to a query embedding by measuring the Hamming distance AND cosine similarity between the query and the chunks' embeddings in binary form. A method for find_closest.

It follows the two-pass approach:

Reference: HuggingFace: Embedding Quantization.

source


# PromptingTools.Experimental.RAGTools.BitPackedBatchEmbedderType.
julia
BitPackedBatchEmbedder <: AbstractEmbedder

Same as BatchEmbedder but reduces the embeddings matrix to a binary form packed in UInt64 (eg, BitMatrix.chunks). Defines a method for get_embeddings.

See also utilities pack_bits and unpack_bits to move between packed/non-packed binary forms.

Reference: HuggingFace: Embedding Quantization.

source


# PromptingTools.Experimental.RAGTools.BitPackedCosineSimilarityType.
julia
BitPackedCosineSimilarity <: AbstractSimilarityFinder

Finds the closest chunks to a query embedding by measuring the Hamming distance AND cosine similarity between the query and the chunks' embeddings in binary form. A method for find_closest.

The difference to BinaryCosineSimilarity is that the binary values are packed into UInt64, which is more efficient.

Reference: HuggingFace: Embedding Quantization. Implementation of hamming_distance is based on TinyRAG.

source


# PromptingTools.Experimental.RAGTools.CandidateChunksType.
julia
CandidateChunks

A struct for storing references to chunks in the given index (identified by index_id) called positions and scores holding the strength of similarity (=1 is the highest, most similar). It's the result of the retrieval stage of RAG.

Fields

source


# PromptingTools.Experimental.RAGTools.ChunkEmbeddingsIndexType.
julia
ChunkEmbeddingsIndex

Main struct for storing document chunks and their embeddings. It also stores tags and sources for each chunk.

Previously, this struct was called ChunkIndex.

Fields

source


# PromptingTools.Experimental.RAGTools.ChunkKeywordsIndexType.
julia
ChunkKeywordsIndex

Struct for storing chunks of text and associated keywords for BM25 similarity search.

Fields

Example

We can easily create a keywords-based index from a standard embeddings-based index.

julia
\n# Let's assume we have a standard embeddings-based index\nindex = build_index(SimpleIndexer(), texts; chunker_kwargs = (; max_length=10))\n\n# Creating an additional index for keyword-based search (BM25), is as simple as\nindex_keywords = ChunkKeywordsIndex(index)\n\n# We can immediately create a MultiIndex (a hybrid index holding both indices)\nmulti_index = MultiIndex([index, index_keywords])

You can also build the index via build_index

julia
# given some sentences and sources\nindex_keywords = build_index(KeywordsIndexer(), sentences; chunker_kwargs=(; sources))\n\n# Retrive closest chunks with\nretriever = SimpleBM25Retriever()\nresult = retrieve(retriever, index_keywords, "What are the best practices for parallel computing in Julia?")\nresult.context

If you want to use airag, don't forget to specify the config to make sure keywords are processed (ie, tokenized) and that BM25 is used for searching candidates

julia
cfg = RAGConfig(; retriever = SimpleBM25Retriever());\nairag(cfg, index_keywords;\n    question = "What are the best practices for parallel computing in Julia?")

source


# PromptingTools.Experimental.RAGTools.ChunkKeywordsIndexMethod.
julia
ChunkKeywordsIndex(\n    [processor::AbstractProcessor=KeywordsProcessor(),] index::ChunkEmbeddingsIndex; verbose::Int = 1,\n    index_id = gensym("ChunkKeywordsIndex"), processor_kwargs...)

Convenience method to quickly create a ChunkKeywordsIndex from an existing ChunkEmbeddingsIndex.

Example

julia
\n# Let's assume we have a standard embeddings-based index\nindex = build_index(SimpleIndexer(), texts; chunker_kwargs = (; max_length=10))\n\n# Creating an additional index for keyword-based search (BM25), is as simple as\nindex_keywords = ChunkKeywordsIndex(index)\n\n# We can immediately create a MultiIndex (a hybrid index holding both indices)\nmulti_index = MultiIndex([index, index_keywords])

source


# PromptingTools.Experimental.RAGTools.CohereRerankerType.
julia
CohereReranker <: AbstractReranker

Rerank strategy using the Cohere Rerank API. Requires an API key. A method for rerank.

source


# PromptingTools.Experimental.RAGTools.ContextEnumeratorType.
julia
ContextEnumerator <: AbstractContextBuilder

Default method for build_context! method. It simply enumerates the context snippets around each position in candidates. When possibly, it will add surrounding chunks (from the same source).

source


# PromptingTools.Experimental.RAGTools.CosineSimilarityType.
julia
CosineSimilarity <: AbstractSimilarityFinder

Finds the closest chunks to a query embedding by measuring the cosine similarity between the query and the chunks' embeddings. A method for find_closest (see the docstring for more details and usage example).

source


# PromptingTools.Experimental.RAGTools.DocumentTermMatrixType.
julia
DocumentTermMatrix{T<:AbstractString}

A sparse matrix of term frequencies and document lengths to allow calculation of BM25 similarity scores.

source


# PromptingTools.Experimental.RAGTools.FileChunkerType.
julia
FileChunker <: AbstractChunker

Chunker when you provide file paths to get_chunks functions.

Ie, the inputs will be validated first (eg, file exists, etc) and then read into memory.

Set as default chunker in get_chunks functions.

source


# PromptingTools.Experimental.RAGTools.FlashRankerType.
julia
FlashRanker <: AbstractReranker

Rerank strategy using the package FlashRank.jl and local models. A method for rerank.

You must first import the FlashRank.jl package. To automatically download any required models, set your ENV["DATADEPS_ALWAYS_ACCEPT"] = true (see DataDeps for more details).

Example

julia
using FlashRank\n\n# Wrap the model to be a valid Ranker recognized by RAGTools\n# It will be provided to the airag/rerank function to avoid instantiating it on every call\nreranker = FlashRank.RankerModel(:mini) |> FlashRanker\n# You can choose :tiny or :mini\n\n## Apply to the pipeline configuration, eg, \ncfg = RAGConfig(; retriever = AdvancedRetriever(; reranker))\n\n# Ask a question (assumes you have some `index`)\nquestion = "What are the best practices for parallel computing in Julia?"\nresult = airag(cfg, index; question, return_all = true)

source


# PromptingTools.Experimental.RAGTools.HTMLStylerType.
julia
HTMLStyler

Defines styling via classes (attribute class) and styles (attribute style) for HTML formatting of AbstractAnnotatedNode

source


# PromptingTools.Experimental.RAGTools.HyDERephraserType.
julia
HyDERephraser <: AbstractRephraser

Rephraser implemented using the provided AI Template (eg, ...) and standard chat model. A method for rephrase.

It uses a prompt-based rephrasing method called HyDE (Hypothetical Document Embedding), where instead of looking for an embedding of the question, we look for the documents most similar to a synthetic passage that would be a good answer to our question.

Reference: Arxiv paper.

source


# PromptingTools.Experimental.RAGTools.JudgeAllScoresType.

final_rating is the average of all scoring criteria. Explain the final_rating in rationale

source


# PromptingTools.Experimental.RAGTools.JudgeRatingType.

Provide the final_rating between 1-5. Provide the rationale for it.

source


# PromptingTools.Experimental.RAGTools.KeywordsIndexerType.
julia
KeywordsIndexer <: AbstractIndexBuilder

Keyword-based index (BM25) to be returned by build_index.

It uses TextChunker, KeywordsProcessor, and NoTagger as default chunker, processor, and tagger.

source


# PromptingTools.Experimental.RAGTools.KeywordsProcessorType.
julia
KeywordsProcessor <: AbstractProcessor

Default keywords processor for get_keywords functions. It normalizes the documents, tokenizes them and builds a DocumentTermMatrix.

source


# PromptingTools.Experimental.RAGTools.MultiCandidateChunksType.
julia
MultiCandidateChunks

A struct for storing references to multiple sets of chunks across different indices. Each set of chunks is identified by an index_id in index_ids, with corresponding positions in the index and scores indicating the strength of similarity.

This struct is useful for scenarios where candidates are drawn from multiple indices, and there is a need to keep track of which candidates came from which index.

Fields

source


# PromptingTools.Experimental.RAGTools.MultiFinderType.
julia
MultiFinder <: AbstractSimilarityFinder

Composite finder for MultiIndex where we want to set multiple finders for each index. A method for find_closest. Positions correspond to indexes(::MultiIndex).

source


# PromptingTools.Experimental.RAGTools.MultiIndexType.
julia
MultiIndex

Composite index that stores multiple ChunkIndex objects and their embeddings.

Fields

Use accesor indexes to access the individual indexes.

Examples

We can create a MultiIndex from a vector of AbstractChunkIndex objects.

julia
index = build_index(SimpleIndexer(), texts; chunker_kwargs = (; sources))\nindex_keywords = ChunkKeywordsIndex(index) # same chunks as above but adds BM25 instead of embeddings\n\nmulti_index = MultiIndex([index, index_keywords])

To use airag with different types of indices, we need to specify how to find the closest items for each index

julia
# Cosine similarity for embeddings and BM25 for keywords, same order as indexes in MultiIndex\nfinder = RT.MultiFinder([RT.CosineSimilarity(), RT.BM25Similarity()])\n\n# Notice that we add `processor` to make sure keywords are processed (ie, tokenized) as well\ncfg = RAGConfig(; retriever = SimpleRetriever(; processor = RT.KeywordsProcessor(), finder))\n\n# Ask questions\nmsg = airag(cfg, multi_index; question = "What are the best practices for parallel computing in Julia?")\npprint(msg) # prettify the answer

source


# PromptingTools.Experimental.RAGTools.NoEmbedderType.
julia
NoEmbedder <: AbstractEmbedder

No-op embedder for get_embeddings functions. It returns nothing.

source


# PromptingTools.Experimental.RAGTools.NoPostprocessorType.
julia
NoPostprocessor <: AbstractPostprocessor

Default method for postprocess! method. A passthrough option that returns the result without any changes.

Overload this method to add custom postprocessing steps, eg, logging, saving conversations to disk, etc.

source


# PromptingTools.Experimental.RAGTools.NoProcessorType.
julia
NoProcessor <: AbstractProcessor

No-op processor for get_keywords functions. It returns the inputs as is.

source


# PromptingTools.Experimental.RAGTools.NoRefinerType.
julia
NoRefiner <: AbstractRefiner

Default method for refine! method. A passthrough option that returns the result.answer without any changes.

source


# PromptingTools.Experimental.RAGTools.NoRephraserType.
julia
NoRephraser <: AbstractRephraser

No-op implementation for rephrase, which simply passes the question through.

source


# PromptingTools.Experimental.RAGTools.NoRerankerType.
julia
NoReranker <: AbstractReranker

No-op implementation for rerank, which simply passes the candidate chunks through.

source


# PromptingTools.Experimental.RAGTools.NoTagFilterType.
julia
NoTagFilter <: AbstractTagFilter

No-op implementation for find_tags, which simply returns all chunks.

source


# PromptingTools.Experimental.RAGTools.NoTaggerType.
julia
NoTagger <: AbstractTagger

No-op tagger for get_tags functions. It returns (nothing, nothing).

source


# PromptingTools.Experimental.RAGTools.OpenTaggerType.
julia
OpenTagger <: AbstractTagger

Tagger for get_tags functions, which generates possible tags for each chunk via aiextract. You can customize it via prompt template (default: :RAGExtractMetadataShort), but it's quite open-ended (ie, AI decides the possible tags).

source


# PromptingTools.Experimental.RAGTools.PassthroughTaggerType.
julia
PassthroughTagger <: AbstractTagger

Tagger for get_tags functions, which passes tags directly as Vector of Vectors of strings (ie, tags[i] is the tags for docs[i]).

source


# PromptingTools.Experimental.RAGTools.RAGConfigType.
julia
RAGConfig <: AbstractRAGConfig

Default configuration for RAG. It uses SimpleIndexer, SimpleRetriever, and SimpleGenerator as default components. Provided as the first argument in airag.

To customize the components, replace corresponding fields for each step of the RAG pipeline (eg, use subtypes(AbstractIndexBuilder) to find the available options).

source


# PromptingTools.Experimental.RAGTools.RAGResultType.
julia
RAGResult

A struct for debugging RAG answers. It contains the question, answer, context, and the candidate chunks at each step of the RAG pipeline.

Think of the flow as question -> rephrased_questions -> answer -> final_answer with the context and candidate chunks helping along the way.

Fields

See also: pprint (pretty printing), annotate_support (for annotating the answer)

source


# PromptingTools.Experimental.RAGTools.RankGPTRerankerType.
julia
RankGPTReranker <: AbstractReranker

Rerank strategy using the RankGPT algorithm (calling LLMs). A method for rerank.

Reference

[1] Is ChatGPT Good at Search? Investigating Large Language Models as Re-Ranking Agents by W. Sun et al. [2] RankGPT Github

source


# PromptingTools.Experimental.RAGTools.RankGPTResultType.
julia
RankGPTResult

Results from the RankGPT algorithm.

Fields

source


# PromptingTools.Experimental.RAGTools.SimpleAnswererType.
julia
SimpleAnswerer <: AbstractAnswerer

Default method for answer! method. Generates an answer using the aigenerate function with the provided context and question.

source


# PromptingTools.Experimental.RAGTools.SimpleBM25RetrieverType.
julia
SimpleBM25Retriever <: AbstractRetriever

Keyword-based implementation for retrieve. It does a simple similarity search via BM25Similarity and returns the results.

Make sure to use consistent processor and tagger with the Preparation Stage (build_index)!

Fields

source


# PromptingTools.Experimental.RAGTools.SimpleGeneratorType.
julia
SimpleGenerator <: AbstractGenerator

Default implementation for generate. It simply enumerates context snippets and runs aigenerate (no refinement).

It uses ContextEnumerator, SimpleAnswerer, NoRefiner, and NoPostprocessor as default contexter, answerer, refiner, and postprocessor.

source


# PromptingTools.Experimental.RAGTools.SimpleIndexerType.
julia
SimpleIndexer <: AbstractIndexBuilder

Default implementation for build_index.

It uses TextChunker, BatchEmbedder, and NoTagger as default chunker, embedder, and tagger.

source


# PromptingTools.Experimental.RAGTools.SimpleRefinerType.
julia
SimpleRefiner <: AbstractRefiner

Refines the answer using the same context previously provided via the provided prompt template. A method for refine!.

source


# PromptingTools.Experimental.RAGTools.SimpleRephraserType.
julia
SimpleRephraser <: AbstractRephraser

Rephraser implemented using the provided AI Template (eg, ...) and standard chat model. A method for rephrase.

source


# PromptingTools.Experimental.RAGTools.SimpleRetrieverType.
julia
SimpleRetriever <: AbstractRetriever

Default implementation for retrieve function. It does a simple similarity search via CosineSimilarity and returns the results.

Make sure to use consistent embedder and tagger with the Preparation Stage (build_index)!

Fields

source


# PromptingTools.Experimental.RAGTools.StylerType.
julia
Styler

Defines styling keywords for printstyled for each AbstractAnnotatedNode

source


# PromptingTools.Experimental.RAGTools.SubChunkIndexType.
julia
SubChunkIndex

A view of the parent index with respect to the chunks (and chunk-aligned fields). All methods and accessors working for AbstractChunkIndex also work for SubChunkIndex. It does not yet work for MultiIndex.

Fields

Example

julia
cc = CandidateChunks(index.id, 1:10)\nsub_index = @view(index[cc])

You can use SubChunkIndex to access chunks or sources (and other fields) from a parent index, eg,

julia
RT.chunks(sub_index)\nRT.sources(sub_index)\nRT.chunkdata(sub_index) # slice of embeddings\nRT.embeddings(sub_index) # slice of embeddings\nRT.tags(sub_index) # slice of tags\nRT.tags_vocab(sub_index) # unchanged, identical to parent version\nRT.extras(sub_index) # slice of extras

Access the parent index that the positions correspond to

julia
parent(sub_index)\nRT.positions(sub_index)

source


# PromptingTools.Experimental.RAGTools.SubDocumentTermMatrixType.

A partial view of a DocumentTermMatrix, tf is MATERIALIZED for performance and fewer allocations.

source


# PromptingTools.Experimental.RAGTools.TavilySearchRefinerType.
julia
TavilySearchRefiner <: AbstractRefiner

Refines the answer by executing a web search using the Tavily API. This method aims to enhance the answer's accuracy and relevance by incorporating information retrieved from the web. A method for refine!.

source


# PromptingTools.Experimental.RAGTools.TextChunkerType.
julia
TextChunker <: AbstractChunker

Chunker when you provide text to get_chunks functions. Inputs are directly chunked

source


# PromptingTools.Experimental.RAGTools.TrigramAnnotaterType.
julia
TrigramAnnotater

Annotation method where we score answer versus each context based on word-level trigrams that match.

It's very simple method (and it can loose some semantic meaning in longer sequences like negative), but it works reasonably well for both text and code.

source


# PromptingTools.Experimental.RAGTools._normalizeFunction.

Shortcut to LinearAlgebra.normalize. Provided in the package extension RAGToolsExperimentalExt (Requires SparseArrays, Unicode, and LinearAlgebra)

source


# PromptingTools.Experimental.RAGTools.add_node_metadata!Method.
julia
add_node_metadata!(annotater::TrigramAnnotater,\n    root::AnnotatedNode; add_sources::Bool = true, add_scores::Bool = true,\n    sources::Union{Nothing, AbstractVector{<:AbstractString}} = nothing)

Adds metadata to the children of root. Metadata includes sources and scores, if requested.

Optionally, it can add a list of sources at the end of the printed text.

The metadata is added by inserting new nodes in the root children list (with no children of its own to be printed out).

source


# PromptingTools.Experimental.RAGTools.airagMethod.
julia
airag(cfg::AbstractRAGConfig, index::AbstractDocumentIndex;\n    question::AbstractString,\n    verbose::Integer = 1, return_all::Bool = false,\n    api_kwargs::NamedTuple = NamedTuple(),\n    retriever::AbstractRetriever = cfg.retriever,\n    retriever_kwargs::NamedTuple = NamedTuple(),\n    generator::AbstractGenerator = cfg.generator,\n    generator_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0))

High-level wrapper for Retrieval-Augmented Generation (RAG), it combines together the retrieve and generate! steps which you can customize if needed.

The simplest version first finds the relevant chunks in index for the question and then sends these chunks to the AI model to help with generating a response to the question.

To customize the components, replace the types (retriever, generator) of the corresponding step of the RAG pipeline - or go into sub-routines within the steps. Eg, use subtypes(AbstractRetriever) to find the available options.

Arguments

Returns

See also build_index, retrieve, generate!, RAGResult, getpropertynested, setpropertynested, merge_kwargs_nested, ChunkKeywordsIndex.

Examples

Using airag to get a response for a question:

julia
index = build_index(...)  # create an index\nquestion = "How to make a barplot in Makie.jl?"\nmsg = airag(index; question)

To understand the details of the RAG process, use return_all=true

julia
msg, details = airag(index; question, return_all = true)\n# details is a RAGDetails object with all the internal steps of the `airag` function

You can also pretty-print details to highlight generated text vs text that is supported by context. It also includes annotations of which context was used for each part of the response (where available).

julia
PT.pprint(details)

Example with advanced retrieval (with question rephrasing and reranking (requires COHERE_API_KEY). We will obtain top 100 chunks from embeddings (top_k) and top 5 chunks from reranking (top_n). In addition, it will be done with a "custom" locally-hosted model.

julia
cfg = RAGConfig(; retriever = AdvancedRetriever())\n\n# kwargs will be big and nested, let's prepare them upfront\n# we specify "custom" model for each component that calls LLM\nkwargs = (\n    retriever_kwargs = (;\n        top_k = 100,\n        top_n = 5,\n        rephraser_kwargs = (;\n            model = "custom"),\n        embedder_kwargs = (;\n            model = "custom"),\n        tagger_kwargs = (;\n            model = "custom")),\n    generator_kwargs = (;\n        answerer_kwargs = (;\n            model = "custom"),\n        refiner_kwargs = (;\n            model = "custom")),\n    api_kwargs = (;\n        url = "http://localhost:8080"))\n\nresult = airag(cfg, index, question; kwargs...)

If you want to use hybrid retrieval (embeddings + BM25), you can easily create an additional index based on keywords and pass them both into a MultiIndex.

You need to provide an explicit config, so the pipeline knows how to handle each index in the search similarity phase (finder).

julia
index = # your existing index\n\n# create the multi-index with the keywords index\nindex_keywords = ChunkKeywordsIndex(index)\nmulti_index = MultiIndex([index, index_keywords])\n\n# define the similarity measures for the indices that you have (same order)\nfinder = RT.MultiFinder([RT.CosineSimilarity(), RT.BM25Similarity()])\ncfg = RAGConfig(; retriever=AdvancedRetriever(; processor=RT.KeywordsProcessor(), finder))\n\n# Run the pipeline with the new hybrid retrieval (return the `RAGResult` to see the details)\nresult = airag(cfg, multi_index; question, return_all=true)\n\n# Pretty-print the result\nPT.pprint(result)

For easier manipulation of nested kwargs, see utilities getpropertynested, setpropertynested, merge_kwargs_nested.

source


# PromptingTools.Experimental.RAGTools.align_node_styles!Method.
julia
align_node_styles!(annotater::TrigramAnnotater, nodes::AbstractVector{<:AnnotatedNode}; kwargs...)

Aligns the styles of the nodes based on the surrounding nodes ("fill-in-the-middle").

If the node has no score, but the surrounding nodes have the same style, the node will inherit the style of the surrounding nodes.

source


# PromptingTools.Experimental.RAGTools.annotate_supportMethod.
julia
annotate_support(annotater::TrigramAnnotater, answer::AbstractString,\n    context::AbstractVector; min_score::Float64 = 0.5,\n    skip_trigrams::Bool = true, hashed::Bool = true,\n    sources::Union{Nothing, AbstractVector{<:AbstractString}} = nothing,\n    min_source_score::Float64 = 0.25,\n    add_sources::Bool = true,\n    add_scores::Bool = true, kwargs...)

Annotates the answer with the overlap/what's supported in context and returns the annotated tree of nodes representing the answer

Returns a "root" node with children nodes representing the sentences/code blocks in the answer. Only the "leaf" nodes are to be printed (to avoid duplication), "leaf" nodes are those with NO children.

Default logic:

Arguments

Example

julia
annotater = TrigramAnnotater()\ncontext = [\n    "This is a test context.", "Another context sentence.", "Final piece of context."]\nanswer = "This is a test context. Another context sentence."\n\nannotated_root = annotate_support(annotater, answer, context)\npprint(annotated_root) # pretty print the annotated tree

source


# PromptingTools.Experimental.RAGTools.annotate_supportMethod.
julia
annotate_support(\n    annotater::TrigramAnnotater, result::AbstractRAGResult; min_score::Float64 = 0.5,\n    skip_trigrams::Bool = true, hashed::Bool = true,\n    min_source_score::Float64 = 0.25,\n    add_sources::Bool = true,\n    add_scores::Bool = true, kwargs...)

Dispatch for annotate_support for AbstractRAGResult type. It extracts the final_answer and context from the result and calls annotate_support with them.

See annotate_support for more details.

Example

julia
res = RAGResult(; question = "", final_answer = "This is a test.",\n    context = ["Test context.", "Completely different"])\nannotated_root = annotate_support(annotater, res)\nPT.pprint(annotated_root)

source


# PromptingTools.Experimental.RAGTools.answer!Method.
julia
answer!(\n    answerer::SimpleAnswerer, index::AbstractDocumentIndex, result::AbstractRAGResult;\n    model::AbstractString = PT.MODEL_CHAT, verbose::Bool = true,\n    template::Symbol = :RAGAnswerFromContext,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Generates an answer using the aigenerate function with the provided result.context and result.question.

Returns

Arguments

source


# PromptingTools.Experimental.RAGTools.build_contextMethod.
julia
build_context(contexter::ContextEnumerator,\n    index::AbstractDocumentIndex, candidates::AbstractCandidateChunks;\n    verbose::Bool = true,\n    chunks_window_margin::Tuple{Int, Int} = (1, 1), kwargs...)\n\n    build_context!(contexter::ContextEnumerator,\n    index::AbstractDocumentIndex, result::AbstractRAGResult; kwargs...)

Build context strings for each position in candidates considering a window margin around each position. If mutating version is used (build_context!), it will use result.reranked_candidates to update the result.context field.

Arguments

Returns

Examples

julia
index = ChunkIndex(...)  # Assuming a proper index is defined\ncandidates = CandidateChunks(index.id, [2, 4], [0.1, 0.2])\ncontext = build_context(ContextEnumerator(), index, candidates; chunks_window_margin=(0, 1)) # include only one following chunk for each matching chunk

source


# PromptingTools.Experimental.RAGTools.build_indexMethod.
julia
build_index(\n    indexer::KeywordsIndexer, files_or_docs::Vector{<:AbstractString};\n    verbose::Integer = 1,\n    extras::Union{Nothing, AbstractVector} = nothing,\n    index_id = gensym("ChunkKeywordsIndex"),\n    chunker::AbstractChunker = indexer.chunker,\n    chunker_kwargs::NamedTuple = NamedTuple(),\n    processor::AbstractProcessor = indexer.processor,\n    processor_kwargs::NamedTuple = NamedTuple(),\n    tagger::AbstractTagger = indexer.tagger,\n    tagger_kwargs::NamedTuple = NamedTuple(),\n    api_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0))

Builds a ChunkKeywordsIndex from the provided files or documents to support keyword-based search (BM25).

source


# PromptingTools.Experimental.RAGTools.build_indexMethod.
julia
build_index(\n    indexer::AbstractIndexBuilder, files_or_docs::Vector{<:AbstractString};\n    verbose::Integer = 1,\n    extras::Union{Nothing, AbstractVector} = nothing,\n    index_id = gensym("ChunkEmbeddingsIndex"),\n    chunker::AbstractChunker = indexer.chunker,\n    chunker_kwargs::NamedTuple = NamedTuple(),\n    embedder::AbstractEmbedder = indexer.embedder,\n    embedder_kwargs::NamedTuple = NamedTuple(),\n    tagger::AbstractTagger = indexer.tagger,\n    tagger_kwargs::NamedTuple = NamedTuple(),\n    api_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0))

Build an INDEX for RAG (Retriever-Augmented Generation) applications from the provided file paths. INDEX is a object storing the document chunks and their embeddings (and potentially other information).

The function processes each file or document (depending on chunker), splits its content into chunks, embeds these chunks, optionally extracts metadata, and then combines this information into a retrievable index.

Define your own methods via indexer and its subcomponents (chunker, embedder, tagger).

Arguments

Returns

See also: ChunkEmbeddingsIndex, get_chunks, get_embeddings, get_tags, CandidateChunks, find_closest, find_tags, rerank, retrieve, generate!, airag

Examples

julia
# Default is loading a vector of strings and chunking them (`TextChunker()`)\nindex = build_index(SimpleIndexer(), texts; chunker_kwargs = (; max_length=10))\n\n# Another example with tags extraction, splitting only sentences and verbose output\n# Assuming `test_files` is a vector of file paths\nindexer = SimpleIndexer(chunker=FileChunker(), tagger=OpenTagger())\nindex = build_index(indexer, test_files; \n        chunker_kwargs(; separators=[". "]), verbose=true)

Notes

source


# PromptingTools.Experimental.RAGTools.build_qa_evalsMethod.
julia
build_qa_evals(doc_chunks::Vector{<:AbstractString}, sources::Vector{<:AbstractString};\n               model=PT.MODEL_CHAT, instructions="None.", qa_template::Symbol=:RAGCreateQAFromContext, \n               verbose::Bool=true, api_kwargs::NamedTuple = NamedTuple(), kwargs...) -> Vector{QAEvalItem}

Create a collection of question and answer evaluations (QAEvalItem) from document chunks and sources. This function generates Q&A pairs based on the provided document chunks, using a specified AI model and template.

Arguments

Returns

Vector{QAEvalItem}: A vector of QAEvalItem structs, each containing a source, context, question, and answer. Invalid or empty items are filtered out.

Notes

Examples

Creating Q&A evaluations from a set of document chunks:

julia
doc_chunks = ["Text from document 1", "Text from document 2"]\nsources = ["source1", "source2"]\nqa_evals = build_qa_evals(doc_chunks, sources)

source


# PromptingTools.Experimental.RAGTools.build_tagsFunction.

Builds a matrix of tags and a vocabulary list. REQUIRES SparseArrays, LinearAlgebra, Unicode packages to be loaded!!

source


# PromptingTools.Experimental.RAGTools.build_tagsMethod.
julia
build_tags(tagger::AbstractTagger, chunk_tags::Nothing; kwargs...)

No-op that skips any tag building, returning nothing, nothing

Otherwise, it would build the sparse matrix and the vocabulary (requires SparseArrays and LinearAlgebra packages to be loaded).

source


# PromptingTools.Experimental.RAGTools.chunkdataMethod.

Access chunkdata for a subset of chunks, chunk_idx is a vector of chunk indices in the index

source


# PromptingTools.Experimental.RAGTools.chunkdataMethod.

Access chunkdata for a subset of chunks, chunk_idx is a vector of chunk indices in the index

source


# PromptingTools.Experimental.RAGTools.chunkdataMethod.

Access chunkdata for a subset of chunks, chunk_idx is a vector of chunk indices in the index

source


# PromptingTools.Experimental.RAGTools.cohere_apiMethod.
julia
cohere_api(;\napi_key::AbstractString,\nendpoint::String,\nurl::AbstractString="https://api.cohere.ai/v1",\nhttp_kwargs::NamedTuple=NamedTuple(),\nkwargs...)

Lightweight wrapper around the Cohere API. See https://cohere.com/docs for more details.

Arguments

source


# PromptingTools.Experimental.RAGTools.create_permutation_instructionMethod.
julia
create_permutation_instruction(\n    context::AbstractVector{<:AbstractString}; rank_start::Integer = 1,\n    rank_end::Integer = 100, max_length::Integer = 512, template::Symbol = :RAGRankGPT)

Creates rendered template with injected context passages.

source


# PromptingTools.Experimental.RAGTools.extract_rankingMethod.
julia
extract_ranking(str::AbstractString)

Extracts the ranking from the response into a sorted array of integers.

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(\n    finder::AbstractSimilarityFinder, index::AbstractChunkIndex,\n    query_emb::AbstractVector{<:Real}, query_tokens::AbstractVector{<:AbstractString} = String[];\n    top_k::Int = 100, kwargs...)

Finds the indices of chunks (represented by embeddings in index) that are closest to query embedding (query_emb).

Returns only top_k closest indices.

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(\n    finder::BitPackedCosineSimilarity, emb::AbstractMatrix{<:Bool},\n    query_emb::AbstractVector{<:Real}, query_tokens::AbstractVector{<:AbstractString} = String[];\n    top_k::Int = 100, rescore_multiplier::Int = 4, minimum_similarity::AbstractFloat = -1.0, kwargs...)

Finds the indices of chunks (represented by embeddings in emb) that are closest to query embedding (query_emb) using bit-packed binary embeddings (in the index).

This is a two-pass approach:

Returns only top_k closest indices.

Reference: HuggingFace: Embedding Quantization.

Examples

Convert any Float embeddings to bit-packed binary like this:

julia
bitpacked_emb = pack_bits(emb.>0)

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(\n    finder::BM25Similarity, dtm::AbstractDocumentTermMatrix,\n    query_emb::AbstractVector{<:Real}, query_tokens::AbstractVector{<:AbstractString} = String[];\n    top_k::Int = 100, minimum_similarity::AbstractFloat = -1.0, kwargs...)

Finds the indices of chunks (represented by DocumentTermMatrix in dtm) that are closest to query tokens (query_tokens) using BM25.

Reference: Wikipedia: BM25. Implementation follows: The Next Generation of Lucene Relevance.

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(\n    finder::CosineSimilarity, emb::AbstractMatrix{<:Real},\n    query_emb::AbstractVector{<:Real}, query_tokens::AbstractVector{<:AbstractString} = String[];\n    top_k::Int = 100, minimum_similarity::AbstractFloat = -1.0, kwargs...)

Finds the indices of chunks (represented by embeddings in emb) that are closest (in cosine similarity for CosineSimilarity()) to query embedding (query_emb).

finder is the logic used for the similarity search. Default is CosineSimilarity.

If minimum_similarity is provided, only indices with similarity greater than or equal to it are returned. Similarity can be between -1 and 1 (-1 = completely opposite, 1 = exactly the same).

Returns only top_k closest indices.

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(\n    finder::BinaryCosineSimilarity, emb::AbstractMatrix{<:Bool},\n    query_emb::AbstractVector{<:Real}, query_tokens::AbstractVector{<:AbstractString} = String[];\n    top_k::Int = 100, rescore_multiplier::Int = 4, minimum_similarity::AbstractFloat = -1.0, kwargs...)

Finds the indices of chunks (represented by embeddings in emb) that are closest to query embedding (query_emb) using binary embeddings (in the index).

This is a two-pass approach:

Returns only top_k closest indices.

Reference: HuggingFace: Embedding Quantization.

Examples

Convert any Float embeddings to binary like this:

julia
binary_emb = map(>(0), emb)

source


# PromptingTools.Experimental.RAGTools.find_tagsMethod.
julia
find_tags(method::AnyTagFilter, index::AbstractChunkIndex,\n    tag::Union{AbstractString, Regex}; kwargs...)\n\nfind_tags(method::AnyTagFilter, index::AbstractChunkIndex,\n    tags::Vector{T}; kwargs...) where {T <: Union{AbstractString, Regex}}

Finds the indices of chunks (represented by tags in index) that have ANY OF the specified tag or tags.

source


# PromptingTools.Experimental.RAGTools.find_tagsMethod.
julia
find_tags(method::AllTagFilter, index::AbstractChunkIndex,\n    tag::Union{AbstractString, Regex}; kwargs...)\n\nfind_tags(method::AllTagFilter, index::AbstractChunkIndex,\n    tags::Vector{T}; kwargs...) where {T <: Union{AbstractString, Regex}}

Finds the indices of chunks (represented by tags in index) that have ALL OF the specified tag or tags.

source


# PromptingTools.Experimental.RAGTools.find_tagsMethod.
julia
find_tags(method::NoTagFilter, index::AbstractChunkIndex,\n    tags::Union{T, AbstractVector{<:T}}; kwargs...) where {T <:\n                                                           Union{\n    AbstractString, Regex, Nothing}}\n    tags; kwargs...)

Returns all chunks in the index, ie, no filtering, so we simply return nothing (easier for dispatch).

source


# PromptingTools.Experimental.RAGTools.generate!Method.
julia
generate!(\n    generator::AbstractGenerator, index::AbstractDocumentIndex, result::AbstractRAGResult;\n    verbose::Integer = 1,\n    api_kwargs::NamedTuple = NamedTuple(),\n    contexter::AbstractContextBuilder = generator.contexter,\n    contexter_kwargs::NamedTuple = NamedTuple(),\n    answerer::AbstractAnswerer = generator.answerer,\n    answerer_kwargs::NamedTuple = NamedTuple(),\n    refiner::AbstractRefiner = generator.refiner,\n    refiner_kwargs::NamedTuple = NamedTuple(),\n    postprocessor::AbstractPostprocessor = generator.postprocessor,\n    postprocessor_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Generate the response using the provided generator and the index and result. It is the second step in the RAG pipeline (after retrieve)

Returns the mutated result with the result.final_answer and the full conversation saved in result.conversations[:final_answer].

Notes

Arguments

See also: retrieve, build_context!, ContextEnumerator, answer!, SimpleAnswerer, refine!, NoRefiner, SimpleRefiner, postprocess!, NoPostprocessor

Examples

julia
Assume we already have `index`\n\nquestion = "What are the best practices for parallel computing in Julia?"\n\n# Retrieve the relevant chunks - returns RAGResult\nresult = retrieve(index, question)\n\n# Generate the answer using the default generator, mutates the same result\nresult = generate!(index, result)

source


# PromptingTools.Experimental.RAGTools.get_chunksMethod.
julia
get_chunks(chunker::AbstractChunker,\n    files_or_docs::Vector{<:AbstractString};\n    sources::AbstractVector{<:AbstractString} = files_or_docs,\n    verbose::Bool = true,\n    separators = ["\\n\\n", ". ", "\\n", " "], max_length::Int = 256)

Chunks the provided files_or_docs into chunks of maximum length max_length (if possible with provided separators).

Supports two modes of operation:

Arguments

source


# PromptingTools.Experimental.RAGTools.get_embeddingsMethod.
julia
get_embeddings(embedder::BatchEmbedder, docs::AbstractVector{<:AbstractString};\n    verbose::Bool = true,\n    model::AbstractString = PT.MODEL_EMBEDDING,\n    truncate_dimension::Union{Int, Nothing} = nothing,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    target_batch_size_length::Int = 80_000,\n    ntasks::Int = 4 * Threads.nthreads(),\n    kwargs...)

Embeds a vector of docs using the provided model (kwarg model) in a batched manner - BatchEmbedder.

BatchEmbedder tries to batch embedding calls for roughly 80K characters per call (to avoid exceeding the API rate limit) to reduce network latency.

Notes

Arguments

source


# PromptingTools.Experimental.RAGTools.get_embeddingsMethod.
julia
get_embeddings(embedder::BinaryBatchEmbedder, docs::AbstractVector{<:AbstractString};\n    verbose::Bool = true,\n    model::AbstractString = PT.MODEL_EMBEDDING,\n    truncate_dimension::Union{Int, Nothing} = nothing,\n    return_type::Type = Matrix{Bool},\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    target_batch_size_length::Int = 80_000,\n    ntasks::Int = 4 * Threads.nthreads(),\n    kwargs...)

Embeds a vector of docs using the provided model (kwarg model) in a batched manner and then returns the binary embeddings matrix - BinaryBatchEmbedder.

BinaryBatchEmbedder tries to batch embedding calls for roughly 80K characters per call (to avoid exceeding the API rate limit) to reduce network latency.

Notes

Arguments

source


# PromptingTools.Experimental.RAGTools.get_embeddingsMethod.
julia
get_embeddings(embedder::BitPackedBatchEmbedder, docs::AbstractVector{<:AbstractString};\n    verbose::Bool = true,\n    model::AbstractString = PT.MODEL_EMBEDDING,\n    truncate_dimension::Union{Int, Nothing} = nothing,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    target_batch_size_length::Int = 80_000,\n    ntasks::Int = 4 * Threads.nthreads(),\n    kwargs...)

Embeds a vector of docs using the provided model (kwarg model) in a batched manner and then returns the binary embeddings matrix represented in UInt64 (bit-packed) - BitPackedBatchEmbedder.

BitPackedBatchEmbedder tries to batch embedding calls for roughly 80K characters per call (to avoid exceeding the API rate limit) to reduce network latency.

The best option for FAST and MEMORY-EFFICIENT storage of embeddings, for retrieval use BitPackedCosineSimilarity.

Notes

Arguments

See also: unpack_bits, pack_bits, BitPackedCosineSimilarity.

source


# PromptingTools.Experimental.RAGTools.get_tagsMethod.
julia
get_tags(tagger::NoTagger, docs::AbstractVector{<:AbstractString};\n    kwargs...)

Simple no-op that skips any tagging of the documents

source


# PromptingTools.Experimental.RAGTools.get_tagsMethod.
julia
get_tags(tagger::OpenTagger, docs::AbstractVector{<:AbstractString};\n    verbose::Bool = true,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Extracts "tags" (metadata/keywords) from a vector of docs using the provided model (kwarg model).

Arguments

source


# PromptingTools.Experimental.RAGTools.get_tagsMethod.
julia
get_tags(tagger::PassthroughTagger, docs::AbstractVector{<:AbstractString};\n    tags::AbstractVector{<:AbstractVector{<:AbstractString}},\n    kwargs...)

Pass tags directly as Vector of Vectors of strings (ie, tags[i] is the tags for docs[i]). It then builds the vocabulary from the tags and returns both the tags in matrix form and the vocabulary.

source


# PromptingTools.Experimental.RAGTools.getpropertynestedFunction.
julia
getpropertynested(\n    nt::NamedTuple, parent_keys::Vector{Symbol}, key::Symbol, default = nothing)

Get a property key from a nested NamedTuple nt, where the property is nested to a key in parent_keys.

Useful for nested kwargs where we want to get some property in parent_keys subset (eg, model in retriever_kwargs).

Examples

julia
kw = (; abc = (; def = "x"))\ngetpropertynested(kw, [:abc], :def)\n# Output: "x"

source


# PromptingTools.Experimental.RAGTools.hamming_distanceMethod.
julia
hamming_distance(\n    mat::AbstractMatrix{T}, query::AbstractVector{T})::Vector{Int} where {T <: Integer}

Calculates the column-wise Hamming distance between a matrix of binary vectors mat and a single binary vector vect.

This is the first-pass ranking for BinaryCosineSimilarity method.

Implementation from domluna's tinyRAG.

source


# PromptingTools.Experimental.RAGTools.hcat_truncateMethod.
julia
hcat_truncate(matrices::AbstractVector{<:AbstractMatrix{T}},\n    truncate_dimension::Union{Nothing, Int} = nothing; verbose::Bool = false) where {T <:\n                                                                                     Real}

Horizontal concatenation of matrices, with optional truncation of the rows of each matrix to the specified dimension (reducing embedding dimensionality).

More efficient that a simple splatting, as the resulting matrix is pre-allocated in one go.

Returns: a Matrix{Float32}

Arguments

Examples

julia
a = rand(Float32, 1000, 10)\nb = rand(Float32, 1000, 20)\n\nc = hcat_truncate([a, b])\nsize(c) # (1000, 30)\n\nd = hcat_truncate([a, b], 500)\nsize(d) # (500, 30)

source


# PromptingTools.Experimental.RAGTools.load_textMethod.
julia
load_text(chunker::AbstractChunker, input;\n    kwargs...)

Load text from input using the provided chunker. Called by get_chunks.

Available chunkers:

source


# PromptingTools.Experimental.RAGTools.merge_kwargs_nestedMethod.
julia
merge_kwargs_nested(nt1::NamedTuple, nt2::NamedTuple)

Merges two nested NamedTuples nt1 and nt2 recursively. The nt2 values will overwrite the nt1 values when overlapping.

Example

julia
kw = (; abc = (; def = "x"))\nkw2 = (; abc = (; def = "x", def2 = 2), new = 1)\nmerge_kwargs_nested(kw, kw2)

source


# PromptingTools.Experimental.RAGTools.pack_bitsMethod.
julia
pack_bits(arr::AbstractMatrix{<:Bool}) -> Matrix{UInt64}\npack_bits(vect::AbstractVector{<:Bool}) -> Vector{UInt64}

Pack a matrix or vector of boolean values into a more compact representation using UInt64.

Arguments (Input)

Returns

Examples

For vectors:

julia
bin = rand(Bool, 128)\nbinint = pack_bits(bin)\nbinx = unpack_bits(binint)\n@assert bin == binx

For matrices:

julia
bin = rand(Bool, 128, 10)\nbinint = pack_bits(bin)\nbinx = unpack_bits(binint)\n@assert bin == binx

source


# PromptingTools.Experimental.RAGTools.permutation_step!Method.
julia
permutation_step!(\n    result::RankGPTResult; rank_start::Integer = 1, rank_end::Integer = 100, kwargs...)

One sub-step of the RankGPT algorithm permutation ranking within the window of chunks defined by rank_start and rank_end positions.

source


# PromptingTools.Experimental.RAGTools.preprocess_tokensFunction.
julia
preprocess_tokens(text::AbstractString, stemmer=nothing; stopwords::Union{Nothing,Set{String}}=nothing, min_length::Int=3)

Preprocess provided text by removing numbers, punctuation, and applying stemming for BM25 search index.

Returns a list of preprocessed tokens.

Example

julia
stemmer = Snowball.Stemmer("english")\nstopwords = Set(["a", "an", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "no", "not", "of", "on", "or", "such", "some", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"])\ntext = "This is a sample paragraph to test the functionality of your text preprocessor. It contains a mix of uppercase and lowercase letters, as well as punctuation marks such as commas, periods, and exclamation points! Let's see how your preprocessor handles quotes, like "this one", and also apostrophes, like in don't. Will it preserve the formatting of this paragraph, including the indentation and line breaks?"\npreprocess_tokens(text, stemmer; stopwords)

source


# PromptingTools.Experimental.RAGTools.print_htmlMethod.
julia
print_html([io::IO,] parent_node::AbstractAnnotatedNode)\n\nprint_html([io::IO,] rag::AbstractRAGResult; add_sources::Bool = false,\n    add_scores::Bool = false, default_styler = HTMLStyler(),\n    low_styler = HTMLStyler(styles = "color:magenta", classes = ""),\n    medium_styler = HTMLStyler(styles = "color:blue", classes = ""),\n    high_styler = HTMLStyler(styles = "", classes = ""), styler_kwargs...)

Pretty-prints the annotation parent_node (or RAGResult) to the io stream (or returns the string) in HTML format (assumes node is styled with styler HTMLStyler).

It wraps each "token" into a span with requested styling (HTMLStyler's properties classes and styles). It also replaces new lines with <br> for better HTML formatting.

For any non-HTML styler, it prints the content as plain text.

Returns

See also HTMLStyler, annotate_support, and set_node_style! for how the styling is applied and what the arguments mean.

Examples

Note: RT is an alias for PromptingTools.Experimental.RAGTools

Simple start directly with the RAGResult:

julia
# set up the text/RAGResult\ncontext = [\n    "This is a test context.", "Another context sentence.", "Final piece of context."]\nanswer = "This is a test answer. It has multiple sentences."\nrag = RT.RAGResult(; context, final_answer=answer, question="")\n\n# print the HTML\nprint_html(rag)

Low-level control by creating our AnnotatedNode:

julia
# prepare your HTML styling\nstyler_kwargs = (;\n    default_styler=RT.HTMLStyler(),\n    low_styler=RT.HTMLStyler(styles="color:magenta", classes=""),\n    medium_styler=RT.HTMLStyler(styles="color:blue", classes=""),\n    high_styler=RT.HTMLStyler(styles="", classes=""))\n\n# annotate the text\ncontext = [\n    "This is a test context.", "Another context sentence.", "Final piece of context."]\nanswer = "This is a test answer. It has multiple sentences."\n\nparent_node = RT.annotate_support(\n    RT.TrigramAnnotater(), answer, context; add_sources=false, add_scores=false, styler_kwargs...)\n\n# print the HTML\nprint_html(parent_node)\n\n# or to accumulate more nodes\nio = IOBuffer()\nprint_html(io, parent_node)

source


# PromptingTools.Experimental.RAGTools.rank_gptMethod.
julia
rank_gpt(chunks::AbstractVector{<:AbstractString}, question::AbstractString;\n    verbose::Int = 1, rank_start::Integer = 1, rank_end::Integer = 100,\n    window_size::Integer = 20, step::Integer = 10,\n    num_rounds::Integer = 1, model::String = "gpt4o", kwargs...)

Ranks the chunks based on their relevance for question. Returns the ranking permutation of the chunks in the order they are most relevant to the question (the first is the most relevant).

Example

julia
result = rank_gpt(chunks, question; rank_start=1, rank_end=25, window_size=8, step=4, num_rounds=3, model="gpt4o")

Reference

[1] Is ChatGPT Good at Search? Investigating Large Language Models as Re-Ranking Agents by W. Sun et al. [2] RankGPT Github

source


# PromptingTools.Experimental.RAGTools.rank_sliding_window!Method.
julia
rank_sliding_window!(\n    result::RankGPTResult; verbose::Int = 1, rank_start = 1, rank_end = 100,\n    window_size = 20, step = 10, model::String = "gpt4o", kwargs...)

One single pass of the RankGPT algorithm permutation ranking across all positions between rank_start and rank_end.

source


# PromptingTools.Experimental.RAGTools.receive_permutation!Method.
julia
receive_permutation!(\n    curr_rank::AbstractVector{<:Integer}, response::AbstractString;\n    rank_start::Integer = 1, rank_end::Integer = 100)

Extracts and heals the permutation to contain all ranking positions.

source


# PromptingTools.Experimental.RAGTools.reciprocal_rank_fusionMethod.
julia
reciprocal_rank_fusion(args...; k::Int=60)

Merges multiple rankings and calculates the reciprocal rank score for each chunk (discounted by the inverse of the rank).

Example

julia
positions1 = [1, 3, 5, 7, 9]\npositions2 = [2, 4, 6, 8, 10]\npositions3 = [2, 4, 6, 11, 12]\n\nmerged_positions, scores = reciprocal_rank_fusion(positions1, positions2, positions3)

source


# PromptingTools.Experimental.RAGTools.reciprocal_rank_fusionMethod.
julia
reciprocal_rank_fusion(\n    positions1::AbstractVector{<:Integer}, scores1::AbstractVector{<:T},\n    positions2::AbstractVector{<:Integer},\n    scores2::AbstractVector{<:T}; k::Int = 60) where {T <: Real}

Merges two sets of rankings and their joint scores. Calculates the reciprocal rank score for each chunk (discounted by the inverse of the rank).

Example

julia
positions1 = [1, 3, 5, 7, 9]\nscores1 = [0.9, 0.8, 0.7, 0.6, 0.5]\npositions2 = [2, 4, 6, 8, 10]\nscores2 = [0.5, 0.6, 0.7, 0.8, 0.9]\n\nmerged, scores = reciprocal_rank_fusion(positions1, scores1, positions2, scores2; k = 60)

source


# PromptingTools.Experimental.RAGTools.refine!Method.
julia
refine!(\n    refiner::NoRefiner, index::AbstractChunkIndex, result::AbstractRAGResult;\n    kwargs...)

Simple no-op function for refine!. It simply copies the result.answer and result.conversations[:answer] without any changes.

source


# PromptingTools.Experimental.RAGTools.refine!Method.
julia
refine!(\n    refiner::SimpleRefiner, index::AbstractDocumentIndex, result::AbstractRAGResult;\n    verbose::Bool = true,\n    model::AbstractString = PT.MODEL_CHAT,\n    template::Symbol = :RAGAnswerRefiner,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Give model a chance to refine the answer (using the same or different context than previously provided).

This method uses the same context as the original answer, however, it can be modified to do additional retrieval and use a different context.

Returns

Arguments

source


# PromptingTools.Experimental.RAGTools.refine!Method.
julia
refine!(\n    refiner::TavilySearchRefiner, index::AbstractDocumentIndex, result::AbstractRAGResult;\n    verbose::Bool = true,\n    model::AbstractString = PT.MODEL_CHAT,\n    include_answer::Bool = true,\n    max_results::Integer = 5,\n    include_domains::AbstractVector{<:AbstractString} = String[],\n    exclude_domains::AbstractVector{<:AbstractString} = String[],\n    template::Symbol = :RAGWebSearchRefiner,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Refines the answer by executing a web search using the Tavily API. This method aims to enhance the answer's accuracy and relevance by incorporating information retrieved from the web.

Note: The web results and web answer (if requested) will be added to the context and sources!

Returns

Arguments

Example

julia
refiner!(TavilySearchRefiner(), index, result)\n# See result.final_answer or pprint(result)

To enable this refiner in a full RAG pipeline, simply swap the component in the config:

julia
cfg = RT.RAGConfig()\ncfg.generator.refiner = RT.TavilySearchRefiner()\n\nresult = airag(cfg, index; question, return_all = true)\npprint(result)

source


# PromptingTools.Experimental.RAGTools.rephraseMethod.
julia
rephrase(rephraser::SimpleRephraser, question::AbstractString;\n    verbose::Bool = true,\n    model::String = PT.MODEL_CHAT, template::Symbol = :RAGQueryHyDE,\n    cost_tracker = Threads.Atomic{Float64}(0.0))

Rephrases the question using the provided rephraser template = RAGQueryHyDE.

Special flavor of rephrasing using HyDE (Hypothetical Document Embedding) method, which aims to find the documents most similar to a synthetic passage that would be a good answer to our question.

Returns both the original and the rephrased question.

Arguments

source


# PromptingTools.Experimental.RAGTools.rephraseMethod.
julia
rephrase(rephraser::NoRephraser, question::AbstractString; kwargs...)

No-op, simple passthrough.

source


# PromptingTools.Experimental.RAGTools.rephraseMethod.
julia
rephrase(rephraser::SimpleRephraser, question::AbstractString;\n    verbose::Bool = true,\n    model::String = PT.MODEL_CHAT, template::Symbol = :RAGQueryOptimizer,\n    cost_tracker = Threads.Atomic{Float64}(0.0), kwargs...)

Rephrases the question using the provided rephraser template.

Returns both the original and the rephrased question.

Arguments

source


# PromptingTools.Experimental.RAGTools.rerankMethod.
julia
rerank(\n    reranker::CohereReranker, index::AbstractDocumentIndex, question::AbstractString,\n    candidates::AbstractCandidateChunks;\n    verbose::Bool = false,\n    api_key::AbstractString = PT.COHERE_API_KEY,\n    top_n::Integer = length(candidates.scores),\n    model::AbstractString = "rerank-english-v3.0",\n    return_documents::Bool = false,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Re-ranks a list of candidate chunks using the Cohere Rerank API. See https://cohere.com/rerank for more details.

Arguments

source


# PromptingTools.Experimental.RAGTools.rerankMethod.
julia
rerank(\n    reranker::RankGPTReranker, index::AbstractDocumentIndex, question::AbstractString,\n    candidates::AbstractCandidateChunks;\n    api_key::AbstractString = PT.OPENAI_API_KEY,\n    model::AbstractString = PT.MODEL_CHAT,\n    verbose::Bool = false,\n    top_n::Integer = length(candidates.scores),\n    unique_chunks::Bool = true,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Re-ranks a list of candidate chunks using the RankGPT algorithm. See https://github.com/sunnweiwei/RankGPT for more details.

It uses LLM calls to rank the candidate chunks.

Arguments

Examples

julia
index = <some index>\nquestion = "What are the best practices for parallel computing in Julia?"\n\ncfg = RAGConfig(; retriever = SimpleRetriever(; reranker = RT.RankGPTReranker()))\nmsg = airag(cfg, index; question, return_all = true)

To get full verbosity of logs, set verbose = 5 (anything higher than 3).

julia
msg = airag(cfg, index; question, return_all = true, verbose = 5)

Reference

[1] Is ChatGPT Good at Search? Investigating Large Language Models as Re-Ranking Agents by W. Sun et al. [2] RankGPT Github

source


# PromptingTools.Experimental.RAGTools.retrieveMethod.
julia
retrieve(retriever::AbstractRetriever,\n    index::AbstractChunkIndex,\n    question::AbstractString;\n    verbose::Integer = 1,\n    top_k::Integer = 100,\n    top_n::Integer = 5,\n    api_kwargs::NamedTuple = NamedTuple(),\n    rephraser::AbstractRephraser = retriever.rephraser,\n    rephraser_kwargs::NamedTuple = NamedTuple(),\n    embedder::AbstractEmbedder = retriever.embedder,\n    embedder_kwargs::NamedTuple = NamedTuple(),\n    processor::AbstractProcessor = retriever.processor,\n    processor_kwargs::NamedTuple = NamedTuple(),\n    finder::AbstractSimilarityFinder = retriever.finder,\n    finder_kwargs::NamedTuple = NamedTuple(),\n    tagger::AbstractTagger = retriever.tagger,\n    tagger_kwargs::NamedTuple = NamedTuple(),\n    filter::AbstractTagFilter = retriever.filter,\n    filter_kwargs::NamedTuple = NamedTuple(),\n    reranker::AbstractReranker = retriever.reranker,\n    reranker_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Retrieves the most relevant chunks from the index for the given question and returns them in the RAGResult object.

This is the main entry point for the retrieval stage of the RAG pipeline. It is often followed by generate! step.

Notes:

The arguments correspond to the steps of the retrieval process (rephrasing, embedding, finding similar docs, tagging, filtering by tags, reranking). You can customize each step by providing a new custom type that dispatches the corresponding function, eg, create your own type struct MyReranker<:AbstractReranker end and define the custom method for it rerank(::MyReranker,...) = ....

Note: Discover available retrieval sub-types for each step with subtypes(AbstractRephraser) and similar for other abstract types.

If you're using locally-hosted models, you can pass the api_kwargs with the url field set to the model's URL and make sure to provide corresponding model kwargs to rephraser, embedder, and tagger to use the custom models (they make AI calls).

Arguments

See also: SimpleRetriever, AdvancedRetriever, build_index, rephrase, get_embeddings, get_keywords, find_closest, get_tags, find_tags, rerank, RAGResult.

Examples

Find the 5 most relevant chunks from the index for the given question.

julia
# assumes you have an existing index `index`\nretriever = SimpleRetriever()\n\nresult = retrieve(retriever,\n    index,\n    "What is the capital of France?",\n    top_n = 5)\n\n# or use the default retriever (same as above)\nresult = retrieve(retriever,\n    index,\n    "What is the capital of France?",\n    top_n = 5)

Apply more advanced retrieval with question rephrasing and reranking (requires COHERE_API_KEY). We will obtain top 100 chunks from embeddings (top_k) and top 5 chunks from reranking (top_n).

julia
retriever = AdvancedRetriever()\n\nresult = retrieve(retriever, index, question; top_k=100, top_n=5)

You can use the retriever to customize your retrieval strategy or directly change the strategy types in the retrieve kwargs!

Example of using locally-hosted model hosted on localhost:8080:

julia
retriever = SimpleRetriever()\nresult = retrieve(retriever, index, question;\n    rephraser_kwargs = (; model = "custom"),\n    embedder_kwargs = (; model = "custom"),\n    tagger_kwargs = (; model = "custom"), api_kwargs = (;\n        url = "http://localhost:8080"))

source


# PromptingTools.Experimental.RAGTools.run_qa_evalsMethod.
julia
run_qa_evals(index::AbstractChunkIndex, qa_items::AbstractVector{<:QAEvalItem};\n    api_kwargs::NamedTuple = NamedTuple(),\n    airag_kwargs::NamedTuple = NamedTuple(),\n    qa_evals_kwargs::NamedTuple = NamedTuple(),\n    verbose::Bool = true, parameters_dict::Dict{Symbol, <:Any} = Dict{Symbol, Any}())

Evaluates a vector of QAEvalItems and returns a vector QAEvalResult. This function assesses the relevance and accuracy of the answers generated in a QA evaluation context.

See ?run_qa_evals for more details.

Arguments

Returns

Vector{QAEvalResult}: Vector of evaluation results that includes various scores and metadata related to the QA evaluation.

Example

julia
index = "..." # Assuming a proper index is defined\nqa_items = [QAEvalItem(question="What is the capital of France?", answer="Paris", context="France is a country in Europe."),\n            QAEvalItem(question="What is the capital of Germany?", answer="Berlin", context="Germany is a country in Europe.")]\n\n# Let's run a test with `top_k=5`\nresults = run_qa_evals(index, qa_items; airag_kwargs=(;top_k=5), parameters_dict=Dict(:top_k => 5))\n\n# Filter out the "failed" calls\nresults = filter(x->!isnothing(x.answer_score), results);\n\n# See average judge score\nmean(x->x.answer_score, results)

source


# PromptingTools.Experimental.RAGTools.run_qa_evalsMethod.
julia
run_qa_evals(qa_item::QAEvalItem, ctx::RAGResult; verbose::Bool = true,\n             parameters_dict::Dict{Symbol, <:Any}, judge_template::Symbol = :RAGJudgeAnswerFromContext,\n             model_judge::AbstractString, api_kwargs::NamedTuple = NamedTuple()) -> QAEvalResult

Evaluates a single QAEvalItem using RAG details (RAGResult) and returns a QAEvalResult structure. This function assesses the relevance and accuracy of the answers generated in a QA evaluation context.

Arguments

Returns

QAEvalResult: An evaluation result that includes various scores and metadata related to the QA evaluation.

Notes

Examples

Evaluating a QA pair using a specific context and model:

julia
qa_item = QAEvalItem(question="What is the capital of France?", answer="Paris", context="France is a country in Europe.")\nctx = RAGResult(source="Wikipedia", context="France is a country in Europe.", answer="Paris")\nparameters_dict = Dict("param1" => "value1", "param2" => "value2")\n\neval_result = run_qa_evals(qa_item, ctx, parameters_dict=parameters_dict, model_judge="MyAIJudgeModel")

source


# PromptingTools.Experimental.RAGTools.score_retrieval_hitMethod.

Returns 1.0 if context overlaps or is contained within any of the candidate_context

source


# PromptingTools.Experimental.RAGTools.score_retrieval_rankMethod.

Returns Integer rank of the position where context overlaps or is contained within a candidate_context

source


# PromptingTools.Experimental.RAGTools.score_to_unit_scaleMethod.
julia
score_to_unit_scale(x::AbstractVector{T}) where T<:Real

Shift and scale a vector of scores to the unit scale [0, 1].

Example

julia
x = [1.0, 2.0, 3.0, 4.0, 5.0]\nscaled_x = score_to_unit_scale(x)

source


# PromptingTools.Experimental.RAGTools.set_node_style!Method.
julia
set_node_style!(::TrigramAnnotater, node::AnnotatedNode;\n    low_threshold::Float64 = 0.0, medium_threshold::Float64 = 0.5, high_threshold::Float64 = 1.0,\n    default_styler::AbstractAnnotationStyler = Styler(),\n    low_styler::AbstractAnnotationStyler = Styler(color = :magenta, bold = false),\n    medium_styler::AbstractAnnotationStyler = Styler(color = :blue, bold = false),\n    high_styler::AbstractAnnotationStyler = Styler(color = :nothing, bold = false),\n    bold_multihits::Bool = false)

Sets style of node based on the provided rules

source


# PromptingTools.Experimental.RAGTools.setpropertynestedMethod.
julia
setpropertynested(nt::NamedTuple, parent_keys::Vector{Symbol},\n    key::Symbol,\n    value

)

Setter for a property key in a nested NamedTuple nt, where the property is nested to a key in parent_keys.

Useful for nested kwargs where we want to change some property in parent_keys subset (eg, model in retriever_kwargs).

Examples

julia
kw = (; abc = (; def = "x"))\nsetpropertynested(kw, [:abc], :def, "y")\n# Output: (abc = (def = "y",),)

Practical example of changing all model keys in CHAT-based steps in the pipeline:

julia
# changes :model to "gpt4t" whenever the parent key is in the below list (chat-based steps)\nsetpropertynested(kwargs,\n    [:rephraser_kwargs, :tagger_kwargs, :answerer_kwargs, :refiner_kwargs],\n    :model, "gpt4t")

Or changing an embedding model (across both indexer and retriever steps, because it's same step name):

julia
kwargs = setpropertynested(\n        kwargs, [:embedder_kwargs],\n        :model, "text-embedding-3-large"\n    )

source


# PromptingTools.Experimental.RAGTools.split_into_code_and_sentencesMethod.
julia
split_into_code_and_sentences(input::Union{String, SubString{String}})

Splits text block into code or text and sub-splits into units.

If code block, it splits by newline but keep the group_id the same (to have the same source) If text block, splits into sentences, bullets, etc., provides different group_id (to have different source)

source


# PromptingTools.Experimental.RAGTools.tags_extractMethod.
julia
tags_extract(item::Tag)\ntags_extract(tags::Vector{Tag})

Extracts the Tag item into a string of the form category:::value (lowercased and spaces replaced with underscores).

Example

julia
msg = aiextract(:RAGExtractMetadataShort; return_type=MaybeTags, text="I like package DataFrames", instructions="None.")\nmetadata = tags_extract(msg.content.items)

source


# PromptingTools.Experimental.RAGTools.token_with_boundariesMethod.
julia
token_with_boundaries(\n    prev_token::Union{Nothing, AbstractString}, curr_token::AbstractString,\n    next_token::Union{Nothing, AbstractString})

Joins the three tokens together. Useful to add boundary tokens (like spaces vs brackets) to the curr_token to improve the matched context (ie, separate partial matches from exact match)

source


# PromptingTools.Experimental.RAGTools.tokenizeMethod.
julia
tokenize(input::Union{String, SubString{String}})

Tokenizes provided input by spaces, special characters or Julia symbols (eg, =>).

Unlike other tokenizers, it aims to lossless - ie, keep both the separated text and the separators.

source


# PromptingTools.Experimental.RAGTools.translate_positions_to_parentMethod.
julia
translate_positions_to_parent(index::AbstractChunkIndex, positions::AbstractVector{<:Integer})

Translate positions to the parent index. Useful to convert between positions in a view and the original index.

Used whenever a chunkdata() is used to re-align positions in case index is a view.

source


# PromptingTools.Experimental.RAGTools.translate_positions_to_parentMethod.
julia
translate_positions_to_parent(\n    index::SubChunkIndex, pos::AbstractVector{<:Integer})

Translate positions to the parent index. Useful to convert between positions in a view and the original index.

Used whenever a chunkdata() or tags() are used to re-align positions to the "parent" index.

source


# PromptingTools.Experimental.RAGTools.trigram_support!Method.
julia
trigram_support!(parent_node::AnnotatedNode,\n    context_trigrams::AbstractVector, trigram_func::F1 = trigrams, token_transform::F2 = identity;\n    skip_trigrams::Bool = false, min_score::Float64 = 0.5,\n    min_source_score::Float64 = 0.25,\n    stop_words::AbstractVector{<:String} = STOPWORDS,\n    styler_kwargs...) where {F1 <: Function, F2 <: Function}

Find if the parent_node.content is supported by the provided context_trigrams.

Logic:

For diagnostics, you can use AbstractTrees.print_tree(parent_node) to see the tree structure of each token and its score.

Example

julia
\nnode = AnnotatedNode(content = "xyz")  trigram_support!(node, context_trigrams) # updates node.children! ```\n\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/Experimental/RAGTools/annotation.jl#L215-L244)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.Experimental.RAGTools.trigrams-Tuple{AbstractString}' href='#PromptingTools.Experimental.RAGTools.trigrams-Tuple{AbstractString}'>#</a>&nbsp;<b><u>PromptingTools.Experimental.RAGTools.trigrams</u></b> &mdash; <i>Method</i>.\n\n\n\n\n```julia\ntrigrams(input_string::AbstractString; add_word::AbstractString = "")

Splits provided input_string into a vector of trigrams (combination of three consecutive characters found in the input_string).

If add_word is provided, it is added to the resulting array. Useful to add the full word itself to the resulting array for exact match.

source


# PromptingTools.Experimental.RAGTools.trigrams_hashedMethod.
julia
trigrams_hashed(input_string::AbstractString; add_word::AbstractString = "")

Splits provided input_string into a Set of hashed trigrams (combination of three consecutive characters found in the input_string).

It is more efficient for lookups in large strings (eg, >100K characters).

If add_word is provided, it is added to the resulting array to hash. Useful to add the full word itself to the resulting array for exact match.

source


# PromptingTools.last_messageMethod.
julia
PT.last_message(result::RAGResult)

Extract the last message from the RAGResult. It looks for final_answer first, then answer fields in the conversations dictionary. Returns nothing if not found.

source


# PromptingTools.last_outputMethod.

Extracts the last output (generated text answer) from the RAGResult.

source


# PromptingTools.pprintMethod.
julia
PromptingTools.pprint(\n    io::IO, node::AbstractAnnotatedNode;\n    text_width::Int = displaysize(io)[2], add_newline::Bool = true)

Pretty print the node to the io stream, including all its children

Supports only node.style::Styler for now.

source


# PromptingTools.pprintMethod.
julia
PT.pprint(\n    io::IO, r::AbstractRAGResult; add_context::Bool = false,\n    text_width::Int = displaysize(io)[2], annotater_kwargs...)

Pretty print the RAG result r to the given io stream.

If add_context is true, the context will be printed as well. The text_width parameter can be used to control the width of the output.

You can provide additional keyword arguments to the annotater, eg, add_sources, add_scores, min_score, etc. See annotate_support for more details.

source


', 284) + createStaticVNode('

Reference for RAGTools

# PromptingTools.Experimental.RAGToolsModule.
julia
RAGTools

Provides Retrieval-Augmented Generation (RAG) functionality.

Requires: LinearAlgebra, SparseArrays, Unicode, PromptingTools for proper functionality.

This module is experimental and may change at any time. It is intended to be moved to a separate package in the future.

source


# PromptingTools.Experimental.RAGTools.AbstractCandidateChunksType.
julia
AbstractCandidateChunks

Abstract type for storing candidate chunks, ie, references to items in a AbstractChunkIndex.

Return type from find_closest and find_tags functions.

Required Fields

source


# PromptingTools.Experimental.RAGTools.AbstractChunkIndexType.
julia
AbstractChunkIndex <: AbstractDocumentIndex

Main abstract type for storing document chunks and their embeddings. It also stores tags and sources for each chunk.

Required Fields

source


# PromptingTools.Experimental.RAGTools.AbstractGeneratorType.
julia
AbstractGenerator <: AbstractGenerationMethod

Abstract type for generating an answer with generate! (use to change the process / return type of generate).

Required Fields

source


# PromptingTools.Experimental.RAGTools.AbstractIndexBuilderType.
julia
AbstractIndexBuilder

Abstract type for building an index with build_index (use to change the process / return type of build_index).

Required Fields

source


# PromptingTools.Experimental.RAGTools.AbstractMultiIndexType.
julia
AbstractMultiIndex <: AbstractDocumentIndex

Experimental abstract type for storing multiple document indexes. Not yet implemented.

source


# PromptingTools.Experimental.RAGTools.AbstractRetrieverType.
julia
AbstractRetriever <: AbstractRetrievalMethod

Abstract type for retrieving chunks from an index with retrieve (use to change the process / return type of retrieve).

Required Fields

source


# PromptingTools.Experimental.RAGTools.AdvancedGeneratorType.
julia
AdvancedGenerator <: AbstractGenerator

Default implementation for generate!. It simply enumerates context snippets and runs aigenerate (no refinement).

It uses ContextEnumerator, SimpleAnswerer, SimpleRefiner, and NoPostprocessor as default contexter, answerer, refiner, and postprocessor.

source


# PromptingTools.Experimental.RAGTools.AdvancedRetrieverType.
julia
AdvancedRetriever <: AbstractRetriever

Dispatch for retrieve with advanced retrieval methods to improve result quality. Compared to SimpleRetriever, it adds rephrasing the query and reranking the results.

Fields

source


# PromptingTools.Experimental.RAGTools.AllTagFilterType.
julia
AllTagFilter <: AbstractTagFilter

Finds the chunks that have ALL OF the specified tag(s). A method for find_tags.

source


# PromptingTools.Experimental.RAGTools.AnnotatedNodeType.
julia
AnnotatedNode{T}  <: AbstractAnnotatedNode

A node to add annotations to the generated answer in airag

Annotations can be: sources, scores, whether its supported or not by the context, etc.

Fields

source


# PromptingTools.Experimental.RAGTools.AnyTagFilterType.
julia
AnyTagFilter <: AbstractTagFilter

Finds the chunks that have ANY OF the specified tag(s). A method for find_tags.

source


# PromptingTools.Experimental.RAGTools.BM25SimilarityType.
julia
BM25Similarity <: AbstractSimilarityFinder

Finds the closest chunks to a query embedding by measuring the BM25 similarity between the query and the chunks' embeddings in binary form. A method for find_closest.

Reference: Wikipedia: BM25. Implementation follows: The Next Generation of Lucene Relevance.

source


# PromptingTools.Experimental.RAGTools.BatchEmbedderType.
julia
BatchEmbedder <: AbstractEmbedder

Default embedder for get_embeddings functions. It passes individual documents to be embedded in chunks to aiembed.

source


# PromptingTools.Experimental.RAGTools.BinaryBatchEmbedderType.
julia
BinaryBatchEmbedder <: AbstractEmbedder

Same as BatchEmbedder but reduces the embeddings matrix to a binary form (eg, BitMatrix). Defines a method for get_embeddings.

Reference: HuggingFace: Embedding Quantization.

source


# PromptingTools.Experimental.RAGTools.BinaryCosineSimilarityType.
julia
BinaryCosineSimilarity <: AbstractSimilarityFinder

Finds the closest chunks to a query embedding by measuring the Hamming distance AND cosine similarity between the query and the chunks' embeddings in binary form. A method for find_closest.

It follows the two-pass approach:

Reference: HuggingFace: Embedding Quantization.

source


# PromptingTools.Experimental.RAGTools.BitPackedBatchEmbedderType.
julia
BitPackedBatchEmbedder <: AbstractEmbedder

Same as BatchEmbedder but reduces the embeddings matrix to a binary form packed in UInt64 (eg, BitMatrix.chunks). Defines a method for get_embeddings.

See also utilities pack_bits and unpack_bits to move between packed/non-packed binary forms.

Reference: HuggingFace: Embedding Quantization.

source


# PromptingTools.Experimental.RAGTools.BitPackedCosineSimilarityType.
julia
BitPackedCosineSimilarity <: AbstractSimilarityFinder

Finds the closest chunks to a query embedding by measuring the Hamming distance AND cosine similarity between the query and the chunks' embeddings in binary form. A method for find_closest.

The difference to BinaryCosineSimilarity is that the binary values are packed into UInt64, which is more efficient.

Reference: HuggingFace: Embedding Quantization. Implementation of hamming_distance is based on TinyRAG.

source


# PromptingTools.Experimental.RAGTools.CandidateChunksType.
julia
CandidateChunks

A struct for storing references to chunks in the given index (identified by index_id) called positions and scores holding the strength of similarity (=1 is the highest, most similar). It's the result of the retrieval stage of RAG.

Fields

source


# PromptingTools.Experimental.RAGTools.ChunkEmbeddingsIndexType.
julia
ChunkEmbeddingsIndex

Main struct for storing document chunks and their embeddings. It also stores tags and sources for each chunk.

Previously, this struct was called ChunkIndex.

Fields

source


# PromptingTools.Experimental.RAGTools.ChunkKeywordsIndexType.
julia
ChunkKeywordsIndex

Struct for storing chunks of text and associated keywords for BM25 similarity search.

Fields

Example

We can easily create a keywords-based index from a standard embeddings-based index.

julia
\n# Let's assume we have a standard embeddings-based index\nindex = build_index(SimpleIndexer(), texts; chunker_kwargs = (; max_length=10))\n\n# Creating an additional index for keyword-based search (BM25), is as simple as\nindex_keywords = ChunkKeywordsIndex(index)\n\n# We can immediately create a MultiIndex (a hybrid index holding both indices)\nmulti_index = MultiIndex([index, index_keywords])

You can also build the index via build_index

julia
# given some sentences and sources\nindex_keywords = build_index(KeywordsIndexer(), sentences; chunker_kwargs=(; sources))\n\n# Retrive closest chunks with\nretriever = SimpleBM25Retriever()\nresult = retrieve(retriever, index_keywords, "What are the best practices for parallel computing in Julia?")\nresult.context

If you want to use airag, don't forget to specify the config to make sure keywords are processed (ie, tokenized) and that BM25 is used for searching candidates

julia
cfg = RAGConfig(; retriever = SimpleBM25Retriever());\nairag(cfg, index_keywords;\n    question = "What are the best practices for parallel computing in Julia?")

source


# PromptingTools.Experimental.RAGTools.ChunkKeywordsIndexMethod.
julia
ChunkKeywordsIndex(\n    [processor::AbstractProcessor=KeywordsProcessor(),] index::ChunkEmbeddingsIndex; verbose::Int = 1,\n    index_id = gensym("ChunkKeywordsIndex"), processor_kwargs...)

Convenience method to quickly create a ChunkKeywordsIndex from an existing ChunkEmbeddingsIndex.

Example

julia
\n# Let's assume we have a standard embeddings-based index\nindex = build_index(SimpleIndexer(), texts; chunker_kwargs = (; max_length=10))\n\n# Creating an additional index for keyword-based search (BM25), is as simple as\nindex_keywords = ChunkKeywordsIndex(index)\n\n# We can immediately create a MultiIndex (a hybrid index holding both indices)\nmulti_index = MultiIndex([index, index_keywords])

source


# PromptingTools.Experimental.RAGTools.CohereRerankerType.
julia
CohereReranker <: AbstractReranker

Rerank strategy using the Cohere Rerank API. Requires an API key. A method for rerank.

source


# PromptingTools.Experimental.RAGTools.ContextEnumeratorType.
julia
ContextEnumerator <: AbstractContextBuilder

Default method for build_context! method. It simply enumerates the context snippets around each position in candidates. When possibly, it will add surrounding chunks (from the same source).

source


# PromptingTools.Experimental.RAGTools.CosineSimilarityType.
julia
CosineSimilarity <: AbstractSimilarityFinder

Finds the closest chunks to a query embedding by measuring the cosine similarity between the query and the chunks' embeddings. A method for find_closest (see the docstring for more details and usage example).

source


# PromptingTools.Experimental.RAGTools.DocumentTermMatrixType.
julia
DocumentTermMatrix{T<:AbstractString}

A sparse matrix of term frequencies and document lengths to allow calculation of BM25 similarity scores.

source


# PromptingTools.Experimental.RAGTools.FileChunkerType.
julia
FileChunker <: AbstractChunker

Chunker when you provide file paths to get_chunks functions.

Ie, the inputs will be validated first (eg, file exists, etc) and then read into memory.

Set as default chunker in get_chunks functions.

source


# PromptingTools.Experimental.RAGTools.FlashRankerType.
julia
FlashRanker <: AbstractReranker

Rerank strategy using the package FlashRank.jl and local models. A method for rerank.

You must first import the FlashRank.jl package. To automatically download any required models, set your ENV["DATADEPS_ALWAYS_ACCEPT"] = true (see DataDeps for more details).

Example

julia
using FlashRank\n\n# Wrap the model to be a valid Ranker recognized by RAGTools\n# It will be provided to the airag/rerank function to avoid instantiating it on every call\nreranker = FlashRank.RankerModel(:mini) |> FlashRanker\n# You can choose :tiny or :mini\n\n## Apply to the pipeline configuration, eg, \ncfg = RAGConfig(; retriever = AdvancedRetriever(; reranker))\n\n# Ask a question (assumes you have some `index`)\nquestion = "What are the best practices for parallel computing in Julia?"\nresult = airag(cfg, index; question, return_all = true)

source


# PromptingTools.Experimental.RAGTools.HTMLStylerType.
julia
HTMLStyler

Defines styling via classes (attribute class) and styles (attribute style) for HTML formatting of AbstractAnnotatedNode

source


# PromptingTools.Experimental.RAGTools.HyDERephraserType.
julia
HyDERephraser <: AbstractRephraser

Rephraser implemented using the provided AI Template (eg, ...) and standard chat model. A method for rephrase.

It uses a prompt-based rephrasing method called HyDE (Hypothetical Document Embedding), where instead of looking for an embedding of the question, we look for the documents most similar to a synthetic passage that would be a good answer to our question.

Reference: Arxiv paper.

source


# PromptingTools.Experimental.RAGTools.JudgeAllScoresType.

final_rating is the average of all scoring criteria. Explain the final_rating in rationale

source


# PromptingTools.Experimental.RAGTools.JudgeRatingType.

Provide the final_rating between 1-5. Provide the rationale for it.

source


# PromptingTools.Experimental.RAGTools.KeywordsIndexerType.
julia
KeywordsIndexer <: AbstractIndexBuilder

Keyword-based index (BM25) to be returned by build_index.

It uses TextChunker, KeywordsProcessor, and NoTagger as default chunker, processor, and tagger.

source


# PromptingTools.Experimental.RAGTools.KeywordsProcessorType.
julia
KeywordsProcessor <: AbstractProcessor

Default keywords processor for get_keywords functions. It normalizes the documents, tokenizes them and builds a DocumentTermMatrix.

source


# PromptingTools.Experimental.RAGTools.MultiCandidateChunksType.
julia
MultiCandidateChunks

A struct for storing references to multiple sets of chunks across different indices. Each set of chunks is identified by an index_id in index_ids, with corresponding positions in the index and scores indicating the strength of similarity.

This struct is useful for scenarios where candidates are drawn from multiple indices, and there is a need to keep track of which candidates came from which index.

Fields

source


# PromptingTools.Experimental.RAGTools.MultiFinderType.
julia
MultiFinder <: AbstractSimilarityFinder

Composite finder for MultiIndex where we want to set multiple finders for each index. A method for find_closest. Positions correspond to indexes(::MultiIndex).

source


# PromptingTools.Experimental.RAGTools.MultiIndexType.
julia
MultiIndex

Composite index that stores multiple ChunkIndex objects and their embeddings.

Fields

Use accesor indexes to access the individual indexes.

Examples

We can create a MultiIndex from a vector of AbstractChunkIndex objects.

julia
index = build_index(SimpleIndexer(), texts; chunker_kwargs = (; sources))\nindex_keywords = ChunkKeywordsIndex(index) # same chunks as above but adds BM25 instead of embeddings\n\nmulti_index = MultiIndex([index, index_keywords])

To use airag with different types of indices, we need to specify how to find the closest items for each index

julia
# Cosine similarity for embeddings and BM25 for keywords, same order as indexes in MultiIndex\nfinder = RT.MultiFinder([RT.CosineSimilarity(), RT.BM25Similarity()])\n\n# Notice that we add `processor` to make sure keywords are processed (ie, tokenized) as well\ncfg = RAGConfig(; retriever = SimpleRetriever(; processor = RT.KeywordsProcessor(), finder))\n\n# Ask questions\nmsg = airag(cfg, multi_index; question = "What are the best practices for parallel computing in Julia?")\npprint(msg) # prettify the answer

source


# PromptingTools.Experimental.RAGTools.NoEmbedderType.
julia
NoEmbedder <: AbstractEmbedder

No-op embedder for get_embeddings functions. It returns nothing.

source


# PromptingTools.Experimental.RAGTools.NoPostprocessorType.
julia
NoPostprocessor <: AbstractPostprocessor

Default method for postprocess! method. A passthrough option that returns the result without any changes.

Overload this method to add custom postprocessing steps, eg, logging, saving conversations to disk, etc.

source


# PromptingTools.Experimental.RAGTools.NoProcessorType.
julia
NoProcessor <: AbstractProcessor

No-op processor for get_keywords functions. It returns the inputs as is.

source


# PromptingTools.Experimental.RAGTools.NoRefinerType.
julia
NoRefiner <: AbstractRefiner

Default method for refine! method. A passthrough option that returns the result.answer without any changes.

source


# PromptingTools.Experimental.RAGTools.NoRephraserType.
julia
NoRephraser <: AbstractRephraser

No-op implementation for rephrase, which simply passes the question through.

source


# PromptingTools.Experimental.RAGTools.NoRerankerType.
julia
NoReranker <: AbstractReranker

No-op implementation for rerank, which simply passes the candidate chunks through.

source


# PromptingTools.Experimental.RAGTools.NoTagFilterType.
julia
NoTagFilter <: AbstractTagFilter

No-op implementation for find_tags, which simply returns all chunks.

source


# PromptingTools.Experimental.RAGTools.NoTaggerType.
julia
NoTagger <: AbstractTagger

No-op tagger for get_tags functions. It returns (nothing, nothing).

source


# PromptingTools.Experimental.RAGTools.OpenTaggerType.
julia
OpenTagger <: AbstractTagger

Tagger for get_tags functions, which generates possible tags for each chunk via aiextract. You can customize it via prompt template (default: :RAGExtractMetadataShort), but it's quite open-ended (ie, AI decides the possible tags).

source


# PromptingTools.Experimental.RAGTools.PassthroughTaggerType.
julia
PassthroughTagger <: AbstractTagger

Tagger for get_tags functions, which passes tags directly as Vector of Vectors of strings (ie, tags[i] is the tags for docs[i]).

source


# PromptingTools.Experimental.RAGTools.RAGConfigType.
julia
RAGConfig <: AbstractRAGConfig

Default configuration for RAG. It uses SimpleIndexer, SimpleRetriever, and SimpleGenerator as default components. Provided as the first argument in airag.

To customize the components, replace corresponding fields for each step of the RAG pipeline (eg, use subtypes(AbstractIndexBuilder) to find the available options).

source


# PromptingTools.Experimental.RAGTools.RAGResultType.
julia
RAGResult

A struct for debugging RAG answers. It contains the question, answer, context, and the candidate chunks at each step of the RAG pipeline.

Think of the flow as question -> rephrased_questions -> answer -> final_answer with the context and candidate chunks helping along the way.

Fields

See also: pprint (pretty printing), annotate_support (for annotating the answer)

source


# PromptingTools.Experimental.RAGTools.RankGPTRerankerType.
julia
RankGPTReranker <: AbstractReranker

Rerank strategy using the RankGPT algorithm (calling LLMs). A method for rerank.

Reference

[1] Is ChatGPT Good at Search? Investigating Large Language Models as Re-Ranking Agents by W. Sun et al. [2] RankGPT Github

source


# PromptingTools.Experimental.RAGTools.RankGPTResultType.
julia
RankGPTResult

Results from the RankGPT algorithm.

Fields

source


# PromptingTools.Experimental.RAGTools.SimpleAnswererType.
julia
SimpleAnswerer <: AbstractAnswerer

Default method for answer! method. Generates an answer using the aigenerate function with the provided context and question.

source


# PromptingTools.Experimental.RAGTools.SimpleBM25RetrieverType.
julia
SimpleBM25Retriever <: AbstractRetriever

Keyword-based implementation for retrieve. It does a simple similarity search via BM25Similarity and returns the results.

Make sure to use consistent processor and tagger with the Preparation Stage (build_index)!

Fields

source


# PromptingTools.Experimental.RAGTools.SimpleGeneratorType.
julia
SimpleGenerator <: AbstractGenerator

Default implementation for generate. It simply enumerates context snippets and runs aigenerate (no refinement).

It uses ContextEnumerator, SimpleAnswerer, NoRefiner, and NoPostprocessor as default contexter, answerer, refiner, and postprocessor.

source


# PromptingTools.Experimental.RAGTools.SimpleIndexerType.
julia
SimpleIndexer <: AbstractIndexBuilder

Default implementation for build_index.

It uses TextChunker, BatchEmbedder, and NoTagger as default chunker, embedder, and tagger.

source


# PromptingTools.Experimental.RAGTools.SimpleRefinerType.
julia
SimpleRefiner <: AbstractRefiner

Refines the answer using the same context previously provided via the provided prompt template. A method for refine!.

source


# PromptingTools.Experimental.RAGTools.SimpleRephraserType.
julia
SimpleRephraser <: AbstractRephraser

Rephraser implemented using the provided AI Template (eg, ...) and standard chat model. A method for rephrase.

source


# PromptingTools.Experimental.RAGTools.SimpleRetrieverType.
julia
SimpleRetriever <: AbstractRetriever

Default implementation for retrieve function. It does a simple similarity search via CosineSimilarity and returns the results.

Make sure to use consistent embedder and tagger with the Preparation Stage (build_index)!

Fields

source


# PromptingTools.Experimental.RAGTools.StylerType.
julia
Styler

Defines styling keywords for printstyled for each AbstractAnnotatedNode

source


# PromptingTools.Experimental.RAGTools.SubChunkIndexType.
julia
SubChunkIndex

A view of the parent index with respect to the chunks (and chunk-aligned fields). All methods and accessors working for AbstractChunkIndex also work for SubChunkIndex. It does not yet work for MultiIndex.

Fields

Example

julia
cc = CandidateChunks(index.id, 1:10)\nsub_index = @view(index[cc])

You can use SubChunkIndex to access chunks or sources (and other fields) from a parent index, eg,

julia
RT.chunks(sub_index)\nRT.sources(sub_index)\nRT.chunkdata(sub_index) # slice of embeddings\nRT.embeddings(sub_index) # slice of embeddings\nRT.tags(sub_index) # slice of tags\nRT.tags_vocab(sub_index) # unchanged, identical to parent version\nRT.extras(sub_index) # slice of extras

Access the parent index that the positions correspond to

julia
parent(sub_index)\nRT.positions(sub_index)

source


# PromptingTools.Experimental.RAGTools.SubDocumentTermMatrixType.

A partial view of a DocumentTermMatrix, tf is MATERIALIZED for performance and fewer allocations.

source


# PromptingTools.Experimental.RAGTools.TavilySearchRefinerType.
julia
TavilySearchRefiner <: AbstractRefiner

Refines the answer by executing a web search using the Tavily API. This method aims to enhance the answer's accuracy and relevance by incorporating information retrieved from the web. A method for refine!.

source


# PromptingTools.Experimental.RAGTools.TextChunkerType.
julia
TextChunker <: AbstractChunker

Chunker when you provide text to get_chunks functions. Inputs are directly chunked

source


# PromptingTools.Experimental.RAGTools.TrigramAnnotaterType.
julia
TrigramAnnotater

Annotation method where we score answer versus each context based on word-level trigrams that match.

It's very simple method (and it can loose some semantic meaning in longer sequences like negative), but it works reasonably well for both text and code.

source


# PromptingTools.Experimental.RAGTools._normalizeFunction.

Shortcut to LinearAlgebra.normalize. Provided in the package extension RAGToolsExperimentalExt (Requires SparseArrays, Unicode, and LinearAlgebra)

source


# PromptingTools.Experimental.RAGTools.add_node_metadata!Method.
julia
add_node_metadata!(annotater::TrigramAnnotater,\n    root::AnnotatedNode; add_sources::Bool = true, add_scores::Bool = true,\n    sources::Union{Nothing, AbstractVector{<:AbstractString}} = nothing)

Adds metadata to the children of root. Metadata includes sources and scores, if requested.

Optionally, it can add a list of sources at the end of the printed text.

The metadata is added by inserting new nodes in the root children list (with no children of its own to be printed out).

source


# PromptingTools.Experimental.RAGTools.airagMethod.
julia
airag(cfg::AbstractRAGConfig, index::AbstractDocumentIndex;\n    question::AbstractString,\n    verbose::Integer = 1, return_all::Bool = false,\n    api_kwargs::NamedTuple = NamedTuple(),\n    retriever::AbstractRetriever = cfg.retriever,\n    retriever_kwargs::NamedTuple = NamedTuple(),\n    generator::AbstractGenerator = cfg.generator,\n    generator_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0))

High-level wrapper for Retrieval-Augmented Generation (RAG), it combines together the retrieve and generate! steps which you can customize if needed.

The simplest version first finds the relevant chunks in index for the question and then sends these chunks to the AI model to help with generating a response to the question.

To customize the components, replace the types (retriever, generator) of the corresponding step of the RAG pipeline - or go into sub-routines within the steps. Eg, use subtypes(AbstractRetriever) to find the available options.

Arguments

Returns

See also build_index, retrieve, generate!, RAGResult, getpropertynested, setpropertynested, merge_kwargs_nested, ChunkKeywordsIndex.

Examples

Using airag to get a response for a question:

julia
index = build_index(...)  # create an index\nquestion = "How to make a barplot in Makie.jl?"\nmsg = airag(index; question)

To understand the details of the RAG process, use return_all=true

julia
msg, details = airag(index; question, return_all = true)\n# details is a RAGDetails object with all the internal steps of the `airag` function

You can also pretty-print details to highlight generated text vs text that is supported by context. It also includes annotations of which context was used for each part of the response (where available).

julia
PT.pprint(details)

Example with advanced retrieval (with question rephrasing and reranking (requires COHERE_API_KEY). We will obtain top 100 chunks from embeddings (top_k) and top 5 chunks from reranking (top_n). In addition, it will be done with a "custom" locally-hosted model.

julia
cfg = RAGConfig(; retriever = AdvancedRetriever())\n\n# kwargs will be big and nested, let's prepare them upfront\n# we specify "custom" model for each component that calls LLM\nkwargs = (\n    retriever_kwargs = (;\n        top_k = 100,\n        top_n = 5,\n        rephraser_kwargs = (;\n            model = "custom"),\n        embedder_kwargs = (;\n            model = "custom"),\n        tagger_kwargs = (;\n            model = "custom")),\n    generator_kwargs = (;\n        answerer_kwargs = (;\n            model = "custom"),\n        refiner_kwargs = (;\n            model = "custom")),\n    api_kwargs = (;\n        url = "http://localhost:8080"))\n\nresult = airag(cfg, index, question; kwargs...)

If you want to use hybrid retrieval (embeddings + BM25), you can easily create an additional index based on keywords and pass them both into a MultiIndex.

You need to provide an explicit config, so the pipeline knows how to handle each index in the search similarity phase (finder).

julia
index = # your existing index\n\n# create the multi-index with the keywords index\nindex_keywords = ChunkKeywordsIndex(index)\nmulti_index = MultiIndex([index, index_keywords])\n\n# define the similarity measures for the indices that you have (same order)\nfinder = RT.MultiFinder([RT.CosineSimilarity(), RT.BM25Similarity()])\ncfg = RAGConfig(; retriever=AdvancedRetriever(; processor=RT.KeywordsProcessor(), finder))\n\n# Run the pipeline with the new hybrid retrieval (return the `RAGResult` to see the details)\nresult = airag(cfg, multi_index; question, return_all=true)\n\n# Pretty-print the result\nPT.pprint(result)

For easier manipulation of nested kwargs, see utilities getpropertynested, setpropertynested, merge_kwargs_nested.

source


# PromptingTools.Experimental.RAGTools.align_node_styles!Method.
julia
align_node_styles!(annotater::TrigramAnnotater, nodes::AbstractVector{<:AnnotatedNode}; kwargs...)

Aligns the styles of the nodes based on the surrounding nodes ("fill-in-the-middle").

If the node has no score, but the surrounding nodes have the same style, the node will inherit the style of the surrounding nodes.

source


# PromptingTools.Experimental.RAGTools.annotate_supportMethod.
julia
annotate_support(annotater::TrigramAnnotater, answer::AbstractString,\n    context::AbstractVector; min_score::Float64 = 0.5,\n    skip_trigrams::Bool = true, hashed::Bool = true,\n    sources::Union{Nothing, AbstractVector{<:AbstractString}} = nothing,\n    min_source_score::Float64 = 0.25,\n    add_sources::Bool = true,\n    add_scores::Bool = true, kwargs...)

Annotates the answer with the overlap/what's supported in context and returns the annotated tree of nodes representing the answer

Returns a "root" node with children nodes representing the sentences/code blocks in the answer. Only the "leaf" nodes are to be printed (to avoid duplication), "leaf" nodes are those with NO children.

Default logic:

Arguments

Example

julia
annotater = TrigramAnnotater()\ncontext = [\n    "This is a test context.", "Another context sentence.", "Final piece of context."]\nanswer = "This is a test context. Another context sentence."\n\nannotated_root = annotate_support(annotater, answer, context)\npprint(annotated_root) # pretty print the annotated tree

source


# PromptingTools.Experimental.RAGTools.annotate_supportMethod.
julia
annotate_support(\n    annotater::TrigramAnnotater, result::AbstractRAGResult; min_score::Float64 = 0.5,\n    skip_trigrams::Bool = true, hashed::Bool = true,\n    min_source_score::Float64 = 0.25,\n    add_sources::Bool = true,\n    add_scores::Bool = true, kwargs...)

Dispatch for annotate_support for AbstractRAGResult type. It extracts the final_answer and context from the result and calls annotate_support with them.

See annotate_support for more details.

Example

julia
res = RAGResult(; question = "", final_answer = "This is a test.",\n    context = ["Test context.", "Completely different"])\nannotated_root = annotate_support(annotater, res)\nPT.pprint(annotated_root)

source


# PromptingTools.Experimental.RAGTools.answer!Method.
julia
answer!(\n    answerer::SimpleAnswerer, index::AbstractDocumentIndex, result::AbstractRAGResult;\n    model::AbstractString = PT.MODEL_CHAT, verbose::Bool = true,\n    template::Symbol = :RAGAnswerFromContext,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Generates an answer using the aigenerate function with the provided result.context and result.question.

Returns

Arguments

source


# PromptingTools.Experimental.RAGTools.build_contextMethod.
julia
build_context(contexter::ContextEnumerator,\n    index::AbstractDocumentIndex, candidates::AbstractCandidateChunks;\n    verbose::Bool = true,\n    chunks_window_margin::Tuple{Int, Int} = (1, 1), kwargs...)\n\n    build_context!(contexter::ContextEnumerator,\n    index::AbstractDocumentIndex, result::AbstractRAGResult; kwargs...)

Build context strings for each position in candidates considering a window margin around each position. If mutating version is used (build_context!), it will use result.reranked_candidates to update the result.context field.

Arguments

Returns

Examples

julia
index = ChunkIndex(...)  # Assuming a proper index is defined\ncandidates = CandidateChunks(index.id, [2, 4], [0.1, 0.2])\ncontext = build_context(ContextEnumerator(), index, candidates; chunks_window_margin=(0, 1)) # include only one following chunk for each matching chunk

source


# PromptingTools.Experimental.RAGTools.build_indexMethod.
julia
build_index(\n    indexer::KeywordsIndexer, files_or_docs::Vector{<:AbstractString};\n    verbose::Integer = 1,\n    extras::Union{Nothing, AbstractVector} = nothing,\n    index_id = gensym("ChunkKeywordsIndex"),\n    chunker::AbstractChunker = indexer.chunker,\n    chunker_kwargs::NamedTuple = NamedTuple(),\n    processor::AbstractProcessor = indexer.processor,\n    processor_kwargs::NamedTuple = NamedTuple(),\n    tagger::AbstractTagger = indexer.tagger,\n    tagger_kwargs::NamedTuple = NamedTuple(),\n    api_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0))

Builds a ChunkKeywordsIndex from the provided files or documents to support keyword-based search (BM25).

source


# PromptingTools.Experimental.RAGTools.build_indexMethod.
julia
build_index(\n    indexer::AbstractIndexBuilder, files_or_docs::Vector{<:AbstractString};\n    verbose::Integer = 1,\n    extras::Union{Nothing, AbstractVector} = nothing,\n    index_id = gensym("ChunkEmbeddingsIndex"),\n    chunker::AbstractChunker = indexer.chunker,\n    chunker_kwargs::NamedTuple = NamedTuple(),\n    embedder::AbstractEmbedder = indexer.embedder,\n    embedder_kwargs::NamedTuple = NamedTuple(),\n    tagger::AbstractTagger = indexer.tagger,\n    tagger_kwargs::NamedTuple = NamedTuple(),\n    api_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0))

Build an INDEX for RAG (Retriever-Augmented Generation) applications from the provided file paths. INDEX is a object storing the document chunks and their embeddings (and potentially other information).

The function processes each file or document (depending on chunker), splits its content into chunks, embeds these chunks, optionally extracts metadata, and then combines this information into a retrievable index.

Define your own methods via indexer and its subcomponents (chunker, embedder, tagger).

Arguments

Returns

See also: ChunkEmbeddingsIndex, get_chunks, get_embeddings, get_tags, CandidateChunks, find_closest, find_tags, rerank, retrieve, generate!, airag

Examples

julia
# Default is loading a vector of strings and chunking them (`TextChunker()`)\nindex = build_index(SimpleIndexer(), texts; chunker_kwargs = (; max_length=10))\n\n# Another example with tags extraction, splitting only sentences and verbose output\n# Assuming `test_files` is a vector of file paths\nindexer = SimpleIndexer(chunker=FileChunker(), tagger=OpenTagger())\nindex = build_index(indexer, test_files; \n        chunker_kwargs(; separators=[". "]), verbose=true)

Notes

source


# PromptingTools.Experimental.RAGTools.build_qa_evalsMethod.
julia
build_qa_evals(doc_chunks::Vector{<:AbstractString}, sources::Vector{<:AbstractString};\n               model=PT.MODEL_CHAT, instructions="None.", qa_template::Symbol=:RAGCreateQAFromContext, \n               verbose::Bool=true, api_kwargs::NamedTuple = NamedTuple(), kwargs...) -> Vector{QAEvalItem}

Create a collection of question and answer evaluations (QAEvalItem) from document chunks and sources. This function generates Q&A pairs based on the provided document chunks, using a specified AI model and template.

Arguments

Returns

Vector{QAEvalItem}: A vector of QAEvalItem structs, each containing a source, context, question, and answer. Invalid or empty items are filtered out.

Notes

Examples

Creating Q&A evaluations from a set of document chunks:

julia
doc_chunks = ["Text from document 1", "Text from document 2"]\nsources = ["source1", "source2"]\nqa_evals = build_qa_evals(doc_chunks, sources)

source


# PromptingTools.Experimental.RAGTools.build_tagsFunction.

Builds a matrix of tags and a vocabulary list. REQUIRES SparseArrays, LinearAlgebra, Unicode packages to be loaded!!

source


# PromptingTools.Experimental.RAGTools.build_tagsMethod.
julia
build_tags(tagger::AbstractTagger, chunk_tags::Nothing; kwargs...)

No-op that skips any tag building, returning nothing, nothing

Otherwise, it would build the sparse matrix and the vocabulary (requires SparseArrays and LinearAlgebra packages to be loaded).

source


# PromptingTools.Experimental.RAGTools.chunkdataMethod.

Access chunkdata for a subset of chunks, chunk_idx is a vector of chunk indices in the index

source


# PromptingTools.Experimental.RAGTools.chunkdataMethod.

Access chunkdata for a subset of chunks, chunk_idx is a vector of chunk indices in the index

source


# PromptingTools.Experimental.RAGTools.chunkdataMethod.

Access chunkdata for a subset of chunks, chunk_idx is a vector of chunk indices in the index

source


# PromptingTools.Experimental.RAGTools.cohere_apiMethod.
julia
cohere_api(;\napi_key::AbstractString,\nendpoint::String,\nurl::AbstractString="https://api.cohere.ai/v1",\nhttp_kwargs::NamedTuple=NamedTuple(),\nkwargs...)

Lightweight wrapper around the Cohere API. See https://cohere.com/docs for more details.

Arguments

source


# PromptingTools.Experimental.RAGTools.create_permutation_instructionMethod.
julia
create_permutation_instruction(\n    context::AbstractVector{<:AbstractString}; rank_start::Integer = 1,\n    rank_end::Integer = 100, max_length::Integer = 512, template::Symbol = :RAGRankGPT)

Creates rendered template with injected context passages.

source


# PromptingTools.Experimental.RAGTools.extract_rankingMethod.
julia
extract_ranking(str::AbstractString)

Extracts the ranking from the response into a sorted array of integers.

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(\n    finder::BinaryCosineSimilarity, emb::AbstractMatrix{<:Bool},\n    query_emb::AbstractVector{<:Real}, query_tokens::AbstractVector{<:AbstractString} = String[];\n    top_k::Int = 100, rescore_multiplier::Int = 4, minimum_similarity::AbstractFloat = -1.0, kwargs...)

Finds the indices of chunks (represented by embeddings in emb) that are closest to query embedding (query_emb) using binary embeddings (in the index).

This is a two-pass approach:

Returns only top_k closest indices.

Reference: HuggingFace: Embedding Quantization.

Examples

Convert any Float embeddings to binary like this:

julia
binary_emb = map(>(0), emb)

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(\n    finder::AbstractSimilarityFinder, index::AbstractChunkIndex,\n    query_emb::AbstractVector{<:Real}, query_tokens::AbstractVector{<:AbstractString} = String[];\n    top_k::Int = 100, kwargs...)

Finds the indices of chunks (represented by embeddings in index) that are closest to query embedding (query_emb).

Returns only top_k closest indices.

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(\n    finder::CosineSimilarity, emb::AbstractMatrix{<:Real},\n    query_emb::AbstractVector{<:Real}, query_tokens::AbstractVector{<:AbstractString} = String[];\n    top_k::Int = 100, minimum_similarity::AbstractFloat = -1.0, kwargs...)

Finds the indices of chunks (represented by embeddings in emb) that are closest (in cosine similarity for CosineSimilarity()) to query embedding (query_emb).

finder is the logic used for the similarity search. Default is CosineSimilarity.

If minimum_similarity is provided, only indices with similarity greater than or equal to it are returned. Similarity can be between -1 and 1 (-1 = completely opposite, 1 = exactly the same).

Returns only top_k closest indices.

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(\n    finder::BM25Similarity, dtm::AbstractDocumentTermMatrix,\n    query_emb::AbstractVector{<:Real}, query_tokens::AbstractVector{<:AbstractString} = String[];\n    top_k::Int = 100, minimum_similarity::AbstractFloat = -1.0, kwargs...)

Finds the indices of chunks (represented by DocumentTermMatrix in dtm) that are closest to query tokens (query_tokens) using BM25.

Reference: Wikipedia: BM25. Implementation follows: The Next Generation of Lucene Relevance.

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(\n    finder::BitPackedCosineSimilarity, emb::AbstractMatrix{<:Bool},\n    query_emb::AbstractVector{<:Real}, query_tokens::AbstractVector{<:AbstractString} = String[];\n    top_k::Int = 100, rescore_multiplier::Int = 4, minimum_similarity::AbstractFloat = -1.0, kwargs...)

Finds the indices of chunks (represented by embeddings in emb) that are closest to query embedding (query_emb) using bit-packed binary embeddings (in the index).

This is a two-pass approach:

Returns only top_k closest indices.

Reference: HuggingFace: Embedding Quantization.

Examples

Convert any Float embeddings to bit-packed binary like this:

julia
bitpacked_emb = pack_bits(emb.>0)

source


# PromptingTools.Experimental.RAGTools.find_tagsMethod.
julia
find_tags(method::AnyTagFilter, index::AbstractChunkIndex,\n    tag::Union{AbstractString, Regex}; kwargs...)\n\nfind_tags(method::AnyTagFilter, index::AbstractChunkIndex,\n    tags::Vector{T}; kwargs...) where {T <: Union{AbstractString, Regex}}

Finds the indices of chunks (represented by tags in index) that have ANY OF the specified tag or tags.

source


# PromptingTools.Experimental.RAGTools.find_tagsMethod.
julia
find_tags(method::AllTagFilter, index::AbstractChunkIndex,\n    tag::Union{AbstractString, Regex}; kwargs...)\n\nfind_tags(method::AllTagFilter, index::AbstractChunkIndex,\n    tags::Vector{T}; kwargs...) where {T <: Union{AbstractString, Regex}}

Finds the indices of chunks (represented by tags in index) that have ALL OF the specified tag or tags.

source


# PromptingTools.Experimental.RAGTools.find_tagsMethod.
julia
find_tags(method::NoTagFilter, index::AbstractChunkIndex,\n    tags::Union{T, AbstractVector{<:T}}; kwargs...) where {T <:\n                                                           Union{\n    AbstractString, Regex, Nothing}}\n    tags; kwargs...)

Returns all chunks in the index, ie, no filtering, so we simply return nothing (easier for dispatch).

source


# PromptingTools.Experimental.RAGTools.generate!Method.
julia
generate!(\n    generator::AbstractGenerator, index::AbstractDocumentIndex, result::AbstractRAGResult;\n    verbose::Integer = 1,\n    api_kwargs::NamedTuple = NamedTuple(),\n    contexter::AbstractContextBuilder = generator.contexter,\n    contexter_kwargs::NamedTuple = NamedTuple(),\n    answerer::AbstractAnswerer = generator.answerer,\n    answerer_kwargs::NamedTuple = NamedTuple(),\n    refiner::AbstractRefiner = generator.refiner,\n    refiner_kwargs::NamedTuple = NamedTuple(),\n    postprocessor::AbstractPostprocessor = generator.postprocessor,\n    postprocessor_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Generate the response using the provided generator and the index and result. It is the second step in the RAG pipeline (after retrieve)

Returns the mutated result with the result.final_answer and the full conversation saved in result.conversations[:final_answer].

Notes

Arguments

See also: retrieve, build_context!, ContextEnumerator, answer!, SimpleAnswerer, refine!, NoRefiner, SimpleRefiner, postprocess!, NoPostprocessor

Examples

julia
Assume we already have `index`\n\nquestion = "What are the best practices for parallel computing in Julia?"\n\n# Retrieve the relevant chunks - returns RAGResult\nresult = retrieve(index, question)\n\n# Generate the answer using the default generator, mutates the same result\nresult = generate!(index, result)

source


# PromptingTools.Experimental.RAGTools.get_chunksMethod.
julia
get_chunks(chunker::AbstractChunker,\n    files_or_docs::Vector{<:AbstractString};\n    sources::AbstractVector{<:AbstractString} = files_or_docs,\n    verbose::Bool = true,\n    separators = ["\\n\\n", ". ", "\\n", " "], max_length::Int = 256)

Chunks the provided files_or_docs into chunks of maximum length max_length (if possible with provided separators).

Supports two modes of operation:

Arguments

source


# PromptingTools.Experimental.RAGTools.get_embeddingsMethod.
julia
get_embeddings(embedder::BatchEmbedder, docs::AbstractVector{<:AbstractString};\n    verbose::Bool = true,\n    model::AbstractString = PT.MODEL_EMBEDDING,\n    truncate_dimension::Union{Int, Nothing} = nothing,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    target_batch_size_length::Int = 80_000,\n    ntasks::Int = 4 * Threads.nthreads(),\n    kwargs...)

Embeds a vector of docs using the provided model (kwarg model) in a batched manner - BatchEmbedder.

BatchEmbedder tries to batch embedding calls for roughly 80K characters per call (to avoid exceeding the API rate limit) to reduce network latency.

Notes

Arguments

source


# PromptingTools.Experimental.RAGTools.get_embeddingsMethod.
julia
get_embeddings(embedder::BinaryBatchEmbedder, docs::AbstractVector{<:AbstractString};\n    verbose::Bool = true,\n    model::AbstractString = PT.MODEL_EMBEDDING,\n    truncate_dimension::Union{Int, Nothing} = nothing,\n    return_type::Type = Matrix{Bool},\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    target_batch_size_length::Int = 80_000,\n    ntasks::Int = 4 * Threads.nthreads(),\n    kwargs...)

Embeds a vector of docs using the provided model (kwarg model) in a batched manner and then returns the binary embeddings matrix - BinaryBatchEmbedder.

BinaryBatchEmbedder tries to batch embedding calls for roughly 80K characters per call (to avoid exceeding the API rate limit) to reduce network latency.

Notes

Arguments

source


# PromptingTools.Experimental.RAGTools.get_embeddingsMethod.
julia
get_embeddings(embedder::BitPackedBatchEmbedder, docs::AbstractVector{<:AbstractString};\n    verbose::Bool = true,\n    model::AbstractString = PT.MODEL_EMBEDDING,\n    truncate_dimension::Union{Int, Nothing} = nothing,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    target_batch_size_length::Int = 80_000,\n    ntasks::Int = 4 * Threads.nthreads(),\n    kwargs...)

Embeds a vector of docs using the provided model (kwarg model) in a batched manner and then returns the binary embeddings matrix represented in UInt64 (bit-packed) - BitPackedBatchEmbedder.

BitPackedBatchEmbedder tries to batch embedding calls for roughly 80K characters per call (to avoid exceeding the API rate limit) to reduce network latency.

The best option for FAST and MEMORY-EFFICIENT storage of embeddings, for retrieval use BitPackedCosineSimilarity.

Notes

Arguments

See also: unpack_bits, pack_bits, BitPackedCosineSimilarity.

source


# PromptingTools.Experimental.RAGTools.get_tagsMethod.
julia
get_tags(tagger::NoTagger, docs::AbstractVector{<:AbstractString};\n    kwargs...)

Simple no-op that skips any tagging of the documents

source


# PromptingTools.Experimental.RAGTools.get_tagsMethod.
julia
get_tags(tagger::OpenTagger, docs::AbstractVector{<:AbstractString};\n    verbose::Bool = true,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Extracts "tags" (metadata/keywords) from a vector of docs using the provided model (kwarg model).

Arguments

source


# PromptingTools.Experimental.RAGTools.get_tagsMethod.
julia
get_tags(tagger::PassthroughTagger, docs::AbstractVector{<:AbstractString};\n    tags::AbstractVector{<:AbstractVector{<:AbstractString}},\n    kwargs...)

Pass tags directly as Vector of Vectors of strings (ie, tags[i] is the tags for docs[i]). It then builds the vocabulary from the tags and returns both the tags in matrix form and the vocabulary.

source


# PromptingTools.Experimental.RAGTools.getpropertynestedFunction.
julia
getpropertynested(\n    nt::NamedTuple, parent_keys::Vector{Symbol}, key::Symbol, default = nothing)

Get a property key from a nested NamedTuple nt, where the property is nested to a key in parent_keys.

Useful for nested kwargs where we want to get some property in parent_keys subset (eg, model in retriever_kwargs).

Examples

julia
kw = (; abc = (; def = "x"))\ngetpropertynested(kw, [:abc], :def)\n# Output: "x"

source


# PromptingTools.Experimental.RAGTools.hamming_distanceMethod.
julia
hamming_distance(\n    mat::AbstractMatrix{T}, query::AbstractVector{T})::Vector{Int} where {T <: Integer}

Calculates the column-wise Hamming distance between a matrix of binary vectors mat and a single binary vector vect.

This is the first-pass ranking for BinaryCosineSimilarity method.

Implementation from domluna's tinyRAG.

source


# PromptingTools.Experimental.RAGTools.hcat_truncateMethod.
julia
hcat_truncate(matrices::AbstractVector{<:AbstractMatrix{T}},\n    truncate_dimension::Union{Nothing, Int} = nothing; verbose::Bool = false) where {T <:\n                                                                                     Real}

Horizontal concatenation of matrices, with optional truncation of the rows of each matrix to the specified dimension (reducing embedding dimensionality).

More efficient that a simple splatting, as the resulting matrix is pre-allocated in one go.

Returns: a Matrix{Float32}

Arguments

Examples

julia
a = rand(Float32, 1000, 10)\nb = rand(Float32, 1000, 20)\n\nc = hcat_truncate([a, b])\nsize(c) # (1000, 30)\n\nd = hcat_truncate([a, b], 500)\nsize(d) # (500, 30)

source


# PromptingTools.Experimental.RAGTools.load_textMethod.
julia
load_text(chunker::AbstractChunker, input;\n    kwargs...)

Load text from input using the provided chunker. Called by get_chunks.

Available chunkers:

source


# PromptingTools.Experimental.RAGTools.merge_kwargs_nestedMethod.
julia
merge_kwargs_nested(nt1::NamedTuple, nt2::NamedTuple)

Merges two nested NamedTuples nt1 and nt2 recursively. The nt2 values will overwrite the nt1 values when overlapping.

Example

julia
kw = (; abc = (; def = "x"))\nkw2 = (; abc = (; def = "x", def2 = 2), new = 1)\nmerge_kwargs_nested(kw, kw2)

source


# PromptingTools.Experimental.RAGTools.pack_bitsMethod.
julia
pack_bits(arr::AbstractMatrix{<:Bool}) -> Matrix{UInt64}\npack_bits(vect::AbstractVector{<:Bool}) -> Vector{UInt64}

Pack a matrix or vector of boolean values into a more compact representation using UInt64.

Arguments (Input)

Returns

Examples

For vectors:

julia
bin = rand(Bool, 128)\nbinint = pack_bits(bin)\nbinx = unpack_bits(binint)\n@assert bin == binx

For matrices:

julia
bin = rand(Bool, 128, 10)\nbinint = pack_bits(bin)\nbinx = unpack_bits(binint)\n@assert bin == binx

source


# PromptingTools.Experimental.RAGTools.permutation_step!Method.
julia
permutation_step!(\n    result::RankGPTResult; rank_start::Integer = 1, rank_end::Integer = 100, kwargs...)

One sub-step of the RankGPT algorithm permutation ranking within the window of chunks defined by rank_start and rank_end positions.

source


# PromptingTools.Experimental.RAGTools.preprocess_tokensFunction.
julia
preprocess_tokens(text::AbstractString, stemmer=nothing; stopwords::Union{Nothing,Set{String}}=nothing, min_length::Int=3)

Preprocess provided text by removing numbers, punctuation, and applying stemming for BM25 search index.

Returns a list of preprocessed tokens.

Example

julia
stemmer = Snowball.Stemmer("english")\nstopwords = Set(["a", "an", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "no", "not", "of", "on", "or", "such", "some", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"])\ntext = "This is a sample paragraph to test the functionality of your text preprocessor. It contains a mix of uppercase and lowercase letters, as well as punctuation marks such as commas, periods, and exclamation points! Let's see how your preprocessor handles quotes, like "this one", and also apostrophes, like in don't. Will it preserve the formatting of this paragraph, including the indentation and line breaks?"\npreprocess_tokens(text, stemmer; stopwords)

source


# PromptingTools.Experimental.RAGTools.print_htmlMethod.
julia
print_html([io::IO,] parent_node::AbstractAnnotatedNode)\n\nprint_html([io::IO,] rag::AbstractRAGResult; add_sources::Bool = false,\n    add_scores::Bool = false, default_styler = HTMLStyler(),\n    low_styler = HTMLStyler(styles = "color:magenta", classes = ""),\n    medium_styler = HTMLStyler(styles = "color:blue", classes = ""),\n    high_styler = HTMLStyler(styles = "", classes = ""), styler_kwargs...)

Pretty-prints the annotation parent_node (or RAGResult) to the io stream (or returns the string) in HTML format (assumes node is styled with styler HTMLStyler).

It wraps each "token" into a span with requested styling (HTMLStyler's properties classes and styles). It also replaces new lines with <br> for better HTML formatting.

For any non-HTML styler, it prints the content as plain text.

Returns

See also HTMLStyler, annotate_support, and set_node_style! for how the styling is applied and what the arguments mean.

Examples

Note: RT is an alias for PromptingTools.Experimental.RAGTools

Simple start directly with the RAGResult:

julia
# set up the text/RAGResult\ncontext = [\n    "This is a test context.", "Another context sentence.", "Final piece of context."]\nanswer = "This is a test answer. It has multiple sentences."\nrag = RT.RAGResult(; context, final_answer=answer, question="")\n\n# print the HTML\nprint_html(rag)

Low-level control by creating our AnnotatedNode:

julia
# prepare your HTML styling\nstyler_kwargs = (;\n    default_styler=RT.HTMLStyler(),\n    low_styler=RT.HTMLStyler(styles="color:magenta", classes=""),\n    medium_styler=RT.HTMLStyler(styles="color:blue", classes=""),\n    high_styler=RT.HTMLStyler(styles="", classes=""))\n\n# annotate the text\ncontext = [\n    "This is a test context.", "Another context sentence.", "Final piece of context."]\nanswer = "This is a test answer. It has multiple sentences."\n\nparent_node = RT.annotate_support(\n    RT.TrigramAnnotater(), answer, context; add_sources=false, add_scores=false, styler_kwargs...)\n\n# print the HTML\nprint_html(parent_node)\n\n# or to accumulate more nodes\nio = IOBuffer()\nprint_html(io, parent_node)

source


# PromptingTools.Experimental.RAGTools.rank_gptMethod.
julia
rank_gpt(chunks::AbstractVector{<:AbstractString}, question::AbstractString;\n    verbose::Int = 1, rank_start::Integer = 1, rank_end::Integer = 100,\n    window_size::Integer = 20, step::Integer = 10,\n    num_rounds::Integer = 1, model::String = "gpt4o", kwargs...)

Ranks the chunks based on their relevance for question. Returns the ranking permutation of the chunks in the order they are most relevant to the question (the first is the most relevant).

Example

julia
result = rank_gpt(chunks, question; rank_start=1, rank_end=25, window_size=8, step=4, num_rounds=3, model="gpt4o")

Reference

[1] Is ChatGPT Good at Search? Investigating Large Language Models as Re-Ranking Agents by W. Sun et al. [2] RankGPT Github

source


# PromptingTools.Experimental.RAGTools.rank_sliding_window!Method.
julia
rank_sliding_window!(\n    result::RankGPTResult; verbose::Int = 1, rank_start = 1, rank_end = 100,\n    window_size = 20, step = 10, model::String = "gpt4o", kwargs...)

One single pass of the RankGPT algorithm permutation ranking across all positions between rank_start and rank_end.

source


# PromptingTools.Experimental.RAGTools.receive_permutation!Method.
julia
receive_permutation!(\n    curr_rank::AbstractVector{<:Integer}, response::AbstractString;\n    rank_start::Integer = 1, rank_end::Integer = 100)

Extracts and heals the permutation to contain all ranking positions.

source


# PromptingTools.Experimental.RAGTools.reciprocal_rank_fusionMethod.
julia
reciprocal_rank_fusion(args...; k::Int=60)

Merges multiple rankings and calculates the reciprocal rank score for each chunk (discounted by the inverse of the rank).

Example

julia
positions1 = [1, 3, 5, 7, 9]\npositions2 = [2, 4, 6, 8, 10]\npositions3 = [2, 4, 6, 11, 12]\n\nmerged_positions, scores = reciprocal_rank_fusion(positions1, positions2, positions3)

source


# PromptingTools.Experimental.RAGTools.reciprocal_rank_fusionMethod.
julia
reciprocal_rank_fusion(\n    positions1::AbstractVector{<:Integer}, scores1::AbstractVector{<:T},\n    positions2::AbstractVector{<:Integer},\n    scores2::AbstractVector{<:T}; k::Int = 60) where {T <: Real}

Merges two sets of rankings and their joint scores. Calculates the reciprocal rank score for each chunk (discounted by the inverse of the rank).

Example

julia
positions1 = [1, 3, 5, 7, 9]\nscores1 = [0.9, 0.8, 0.7, 0.6, 0.5]\npositions2 = [2, 4, 6, 8, 10]\nscores2 = [0.5, 0.6, 0.7, 0.8, 0.9]\n\nmerged, scores = reciprocal_rank_fusion(positions1, scores1, positions2, scores2; k = 60)

source


# PromptingTools.Experimental.RAGTools.refine!Method.
julia
refine!(\n    refiner::NoRefiner, index::AbstractChunkIndex, result::AbstractRAGResult;\n    kwargs...)

Simple no-op function for refine!. It simply copies the result.answer and result.conversations[:answer] without any changes.

source


# PromptingTools.Experimental.RAGTools.refine!Method.
julia
refine!(\n    refiner::SimpleRefiner, index::AbstractDocumentIndex, result::AbstractRAGResult;\n    verbose::Bool = true,\n    model::AbstractString = PT.MODEL_CHAT,\n    template::Symbol = :RAGAnswerRefiner,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Give model a chance to refine the answer (using the same or different context than previously provided).

This method uses the same context as the original answer, however, it can be modified to do additional retrieval and use a different context.

Returns

Arguments

source


# PromptingTools.Experimental.RAGTools.refine!Method.
julia
refine!(\n    refiner::TavilySearchRefiner, index::AbstractDocumentIndex, result::AbstractRAGResult;\n    verbose::Bool = true,\n    model::AbstractString = PT.MODEL_CHAT,\n    include_answer::Bool = true,\n    max_results::Integer = 5,\n    include_domains::AbstractVector{<:AbstractString} = String[],\n    exclude_domains::AbstractVector{<:AbstractString} = String[],\n    template::Symbol = :RAGWebSearchRefiner,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Refines the answer by executing a web search using the Tavily API. This method aims to enhance the answer's accuracy and relevance by incorporating information retrieved from the web.

Note: The web results and web answer (if requested) will be added to the context and sources!

Returns

Arguments

Example

julia
refiner!(TavilySearchRefiner(), index, result)\n# See result.final_answer or pprint(result)

To enable this refiner in a full RAG pipeline, simply swap the component in the config:

julia
cfg = RT.RAGConfig()\ncfg.generator.refiner = RT.TavilySearchRefiner()\n\nresult = airag(cfg, index; question, return_all = true)\npprint(result)

source


# PromptingTools.Experimental.RAGTools.rephraseMethod.
julia
rephrase(rephraser::SimpleRephraser, question::AbstractString;\n    verbose::Bool = true,\n    model::String = PT.MODEL_CHAT, template::Symbol = :RAGQueryHyDE,\n    cost_tracker = Threads.Atomic{Float64}(0.0))

Rephrases the question using the provided rephraser template = RAGQueryHyDE.

Special flavor of rephrasing using HyDE (Hypothetical Document Embedding) method, which aims to find the documents most similar to a synthetic passage that would be a good answer to our question.

Returns both the original and the rephrased question.

Arguments

source


# PromptingTools.Experimental.RAGTools.rephraseMethod.
julia
rephrase(rephraser::NoRephraser, question::AbstractString; kwargs...)

No-op, simple passthrough.

source


# PromptingTools.Experimental.RAGTools.rephraseMethod.
julia
rephrase(rephraser::SimpleRephraser, question::AbstractString;\n    verbose::Bool = true,\n    model::String = PT.MODEL_CHAT, template::Symbol = :RAGQueryOptimizer,\n    cost_tracker = Threads.Atomic{Float64}(0.0), kwargs...)

Rephrases the question using the provided rephraser template.

Returns both the original and the rephrased question.

Arguments

source


# PromptingTools.Experimental.RAGTools.rerankMethod.
julia
rerank(\n    reranker::CohereReranker, index::AbstractDocumentIndex, question::AbstractString,\n    candidates::AbstractCandidateChunks;\n    verbose::Bool = false,\n    api_key::AbstractString = PT.COHERE_API_KEY,\n    top_n::Integer = length(candidates.scores),\n    model::AbstractString = "rerank-english-v3.0",\n    return_documents::Bool = false,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Re-ranks a list of candidate chunks using the Cohere Rerank API. See https://cohere.com/rerank for more details.

Arguments

source


# PromptingTools.Experimental.RAGTools.rerankMethod.
julia
rerank(\n    reranker::RankGPTReranker, index::AbstractDocumentIndex, question::AbstractString,\n    candidates::AbstractCandidateChunks;\n    api_key::AbstractString = PT.OPENAI_API_KEY,\n    model::AbstractString = PT.MODEL_CHAT,\n    verbose::Bool = false,\n    top_n::Integer = length(candidates.scores),\n    unique_chunks::Bool = true,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Re-ranks a list of candidate chunks using the RankGPT algorithm. See https://github.com/sunnweiwei/RankGPT for more details.

It uses LLM calls to rank the candidate chunks.

Arguments

Examples

julia
index = <some index>\nquestion = "What are the best practices for parallel computing in Julia?"\n\ncfg = RAGConfig(; retriever = SimpleRetriever(; reranker = RT.RankGPTReranker()))\nmsg = airag(cfg, index; question, return_all = true)

To get full verbosity of logs, set verbose = 5 (anything higher than 3).

julia
msg = airag(cfg, index; question, return_all = true, verbose = 5)

Reference

[1] Is ChatGPT Good at Search? Investigating Large Language Models as Re-Ranking Agents by W. Sun et al. [2] RankGPT Github

source


# PromptingTools.Experimental.RAGTools.retrieveMethod.
julia
retrieve(retriever::AbstractRetriever,\n    index::AbstractChunkIndex,\n    question::AbstractString;\n    verbose::Integer = 1,\n    top_k::Integer = 100,\n    top_n::Integer = 5,\n    api_kwargs::NamedTuple = NamedTuple(),\n    rephraser::AbstractRephraser = retriever.rephraser,\n    rephraser_kwargs::NamedTuple = NamedTuple(),\n    embedder::AbstractEmbedder = retriever.embedder,\n    embedder_kwargs::NamedTuple = NamedTuple(),\n    processor::AbstractProcessor = retriever.processor,\n    processor_kwargs::NamedTuple = NamedTuple(),\n    finder::AbstractSimilarityFinder = retriever.finder,\n    finder_kwargs::NamedTuple = NamedTuple(),\n    tagger::AbstractTagger = retriever.tagger,\n    tagger_kwargs::NamedTuple = NamedTuple(),\n    filter::AbstractTagFilter = retriever.filter,\n    filter_kwargs::NamedTuple = NamedTuple(),\n    reranker::AbstractReranker = retriever.reranker,\n    reranker_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Retrieves the most relevant chunks from the index for the given question and returns them in the RAGResult object.

This is the main entry point for the retrieval stage of the RAG pipeline. It is often followed by generate! step.

Notes:

The arguments correspond to the steps of the retrieval process (rephrasing, embedding, finding similar docs, tagging, filtering by tags, reranking). You can customize each step by providing a new custom type that dispatches the corresponding function, eg, create your own type struct MyReranker<:AbstractReranker end and define the custom method for it rerank(::MyReranker,...) = ....

Note: Discover available retrieval sub-types for each step with subtypes(AbstractRephraser) and similar for other abstract types.

If you're using locally-hosted models, you can pass the api_kwargs with the url field set to the model's URL and make sure to provide corresponding model kwargs to rephraser, embedder, and tagger to use the custom models (they make AI calls).

Arguments

See also: SimpleRetriever, AdvancedRetriever, build_index, rephrase, get_embeddings, get_keywords, find_closest, get_tags, find_tags, rerank, RAGResult.

Examples

Find the 5 most relevant chunks from the index for the given question.

julia
# assumes you have an existing index `index`\nretriever = SimpleRetriever()\n\nresult = retrieve(retriever,\n    index,\n    "What is the capital of France?",\n    top_n = 5)\n\n# or use the default retriever (same as above)\nresult = retrieve(retriever,\n    index,\n    "What is the capital of France?",\n    top_n = 5)

Apply more advanced retrieval with question rephrasing and reranking (requires COHERE_API_KEY). We will obtain top 100 chunks from embeddings (top_k) and top 5 chunks from reranking (top_n).

julia
retriever = AdvancedRetriever()\n\nresult = retrieve(retriever, index, question; top_k=100, top_n=5)

You can use the retriever to customize your retrieval strategy or directly change the strategy types in the retrieve kwargs!

Example of using locally-hosted model hosted on localhost:8080:

julia
retriever = SimpleRetriever()\nresult = retrieve(retriever, index, question;\n    rephraser_kwargs = (; model = "custom"),\n    embedder_kwargs = (; model = "custom"),\n    tagger_kwargs = (; model = "custom"), api_kwargs = (;\n        url = "http://localhost:8080"))

source


# PromptingTools.Experimental.RAGTools.run_qa_evalsMethod.
julia
run_qa_evals(index::AbstractChunkIndex, qa_items::AbstractVector{<:QAEvalItem};\n    api_kwargs::NamedTuple = NamedTuple(),\n    airag_kwargs::NamedTuple = NamedTuple(),\n    qa_evals_kwargs::NamedTuple = NamedTuple(),\n    verbose::Bool = true, parameters_dict::Dict{Symbol, <:Any} = Dict{Symbol, Any}())

Evaluates a vector of QAEvalItems and returns a vector QAEvalResult. This function assesses the relevance and accuracy of the answers generated in a QA evaluation context.

See ?run_qa_evals for more details.

Arguments

Returns

Vector{QAEvalResult}: Vector of evaluation results that includes various scores and metadata related to the QA evaluation.

Example

julia
index = "..." # Assuming a proper index is defined\nqa_items = [QAEvalItem(question="What is the capital of France?", answer="Paris", context="France is a country in Europe."),\n            QAEvalItem(question="What is the capital of Germany?", answer="Berlin", context="Germany is a country in Europe.")]\n\n# Let's run a test with `top_k=5`\nresults = run_qa_evals(index, qa_items; airag_kwargs=(;top_k=5), parameters_dict=Dict(:top_k => 5))\n\n# Filter out the "failed" calls\nresults = filter(x->!isnothing(x.answer_score), results);\n\n# See average judge score\nmean(x->x.answer_score, results)

source


# PromptingTools.Experimental.RAGTools.run_qa_evalsMethod.
julia
run_qa_evals(qa_item::QAEvalItem, ctx::RAGResult; verbose::Bool = true,\n             parameters_dict::Dict{Symbol, <:Any}, judge_template::Symbol = :RAGJudgeAnswerFromContext,\n             model_judge::AbstractString, api_kwargs::NamedTuple = NamedTuple()) -> QAEvalResult

Evaluates a single QAEvalItem using RAG details (RAGResult) and returns a QAEvalResult structure. This function assesses the relevance and accuracy of the answers generated in a QA evaluation context.

Arguments

Returns

QAEvalResult: An evaluation result that includes various scores and metadata related to the QA evaluation.

Notes

Examples

Evaluating a QA pair using a specific context and model:

julia
qa_item = QAEvalItem(question="What is the capital of France?", answer="Paris", context="France is a country in Europe.")\nctx = RAGResult(source="Wikipedia", context="France is a country in Europe.", answer="Paris")\nparameters_dict = Dict("param1" => "value1", "param2" => "value2")\n\neval_result = run_qa_evals(qa_item, ctx, parameters_dict=parameters_dict, model_judge="MyAIJudgeModel")

source


# PromptingTools.Experimental.RAGTools.score_retrieval_hitMethod.

Returns 1.0 if context overlaps or is contained within any of the candidate_context

source


# PromptingTools.Experimental.RAGTools.score_retrieval_rankMethod.

Returns Integer rank of the position where context overlaps or is contained within a candidate_context

source


# PromptingTools.Experimental.RAGTools.score_to_unit_scaleMethod.
julia
score_to_unit_scale(x::AbstractVector{T}) where T<:Real

Shift and scale a vector of scores to the unit scale [0, 1].

Example

julia
x = [1.0, 2.0, 3.0, 4.0, 5.0]\nscaled_x = score_to_unit_scale(x)

source


# PromptingTools.Experimental.RAGTools.set_node_style!Method.
julia
set_node_style!(::TrigramAnnotater, node::AnnotatedNode;\n    low_threshold::Float64 = 0.0, medium_threshold::Float64 = 0.5, high_threshold::Float64 = 1.0,\n    default_styler::AbstractAnnotationStyler = Styler(),\n    low_styler::AbstractAnnotationStyler = Styler(color = :magenta, bold = false),\n    medium_styler::AbstractAnnotationStyler = Styler(color = :blue, bold = false),\n    high_styler::AbstractAnnotationStyler = Styler(color = :nothing, bold = false),\n    bold_multihits::Bool = false)

Sets style of node based on the provided rules

source


# PromptingTools.Experimental.RAGTools.setpropertynestedMethod.
julia
setpropertynested(nt::NamedTuple, parent_keys::Vector{Symbol},\n    key::Symbol,\n    value

)

Setter for a property key in a nested NamedTuple nt, where the property is nested to a key in parent_keys.

Useful for nested kwargs where we want to change some property in parent_keys subset (eg, model in retriever_kwargs).

Examples

julia
kw = (; abc = (; def = "x"))\nsetpropertynested(kw, [:abc], :def, "y")\n# Output: (abc = (def = "y",),)

Practical example of changing all model keys in CHAT-based steps in the pipeline:

julia
# changes :model to "gpt4t" whenever the parent key is in the below list (chat-based steps)\nsetpropertynested(kwargs,\n    [:rephraser_kwargs, :tagger_kwargs, :answerer_kwargs, :refiner_kwargs],\n    :model, "gpt4t")

Or changing an embedding model (across both indexer and retriever steps, because it's same step name):

julia
kwargs = setpropertynested(\n        kwargs, [:embedder_kwargs],\n        :model, "text-embedding-3-large"\n    )

source


# PromptingTools.Experimental.RAGTools.split_into_code_and_sentencesMethod.
julia
split_into_code_and_sentences(input::Union{String, SubString{String}})

Splits text block into code or text and sub-splits into units.

If code block, it splits by newline but keep the group_id the same (to have the same source) If text block, splits into sentences, bullets, etc., provides different group_id (to have different source)

source


# PromptingTools.Experimental.RAGTools.tags_extractMethod.
julia
tags_extract(item::Tag)\ntags_extract(tags::Vector{Tag})

Extracts the Tag item into a string of the form category:::value (lowercased and spaces replaced with underscores).

Example

julia
msg = aiextract(:RAGExtractMetadataShort; return_type=MaybeTags, text="I like package DataFrames", instructions="None.")\nmetadata = tags_extract(msg.content.items)

source


# PromptingTools.Experimental.RAGTools.token_with_boundariesMethod.
julia
token_with_boundaries(\n    prev_token::Union{Nothing, AbstractString}, curr_token::AbstractString,\n    next_token::Union{Nothing, AbstractString})

Joins the three tokens together. Useful to add boundary tokens (like spaces vs brackets) to the curr_token to improve the matched context (ie, separate partial matches from exact match)

source


# PromptingTools.Experimental.RAGTools.tokenizeMethod.
julia
tokenize(input::Union{String, SubString{String}})

Tokenizes provided input by spaces, special characters or Julia symbols (eg, =>).

Unlike other tokenizers, it aims to lossless - ie, keep both the separated text and the separators.

source


# PromptingTools.Experimental.RAGTools.translate_positions_to_parentMethod.
julia
translate_positions_to_parent(index::AbstractChunkIndex, positions::AbstractVector{<:Integer})

Translate positions to the parent index. Useful to convert between positions in a view and the original index.

Used whenever a chunkdata() is used to re-align positions in case index is a view.

source


# PromptingTools.Experimental.RAGTools.translate_positions_to_parentMethod.
julia
translate_positions_to_parent(\n    index::SubChunkIndex, pos::AbstractVector{<:Integer})

Translate positions to the parent index. Useful to convert between positions in a view and the original index.

Used whenever a chunkdata() or tags() are used to re-align positions to the "parent" index.

source


# PromptingTools.Experimental.RAGTools.trigram_support!Method.
julia
trigram_support!(parent_node::AnnotatedNode,\n    context_trigrams::AbstractVector, trigram_func::F1 = trigrams, token_transform::F2 = identity;\n    skip_trigrams::Bool = false, min_score::Float64 = 0.5,\n    min_source_score::Float64 = 0.25,\n    stop_words::AbstractVector{<:String} = STOPWORDS,\n    styler_kwargs...) where {F1 <: Function, F2 <: Function}

Find if the parent_node.content is supported by the provided context_trigrams.

Logic:

For diagnostics, you can use AbstractTrees.print_tree(parent_node) to see the tree structure of each token and its score.

Example

julia
\nnode = AnnotatedNode(content = "xyz")  trigram_support!(node, context_trigrams) # updates node.children! ```\n\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/Experimental/RAGTools/annotation.jl#L215-L244)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.Experimental.RAGTools.trigrams-Tuple{AbstractString}' href='#PromptingTools.Experimental.RAGTools.trigrams-Tuple{AbstractString}'>#</a>&nbsp;<b><u>PromptingTools.Experimental.RAGTools.trigrams</u></b> &mdash; <i>Method</i>.\n\n\n\n\n```julia\ntrigrams(input_string::AbstractString; add_word::AbstractString = "")

Splits provided input_string into a vector of trigrams (combination of three consecutive characters found in the input_string).

If add_word is provided, it is added to the resulting array. Useful to add the full word itself to the resulting array for exact match.

source


# PromptingTools.Experimental.RAGTools.trigrams_hashedMethod.
julia
trigrams_hashed(input_string::AbstractString; add_word::AbstractString = "")

Splits provided input_string into a Set of hashed trigrams (combination of three consecutive characters found in the input_string).

It is more efficient for lookups in large strings (eg, >100K characters).

If add_word is provided, it is added to the resulting array to hash. Useful to add the full word itself to the resulting array for exact match.

source


# PromptingTools.last_messageMethod.
julia
PT.last_message(result::RAGResult)

Extract the last message from the RAGResult. It looks for final_answer first, then answer fields in the conversations dictionary. Returns nothing if not found.

source


# PromptingTools.last_outputMethod.

Extracts the last output (generated text answer) from the RAGResult.

source


# PromptingTools.pprintMethod.
julia
PromptingTools.pprint(\n    io::IO, node::AbstractAnnotatedNode;\n    text_width::Int = displaysize(io)[2], add_newline::Bool = true)

Pretty print the node to the io stream, including all its children

Supports only node.style::Styler for now.

source


# PromptingTools.pprintMethod.
julia
PT.pprint(\n    io::IO, r::AbstractRAGResult; add_context::Bool = false,\n    text_width::Int = displaysize(io)[2], annotater_kwargs...)

Pretty print the RAG result r to the given io stream.

If add_context is true, the context will be printed as well. The text_width parameter can be used to control the width of the output.

You can provide additional keyword arguments to the annotater, eg, add_sources, add_scores, min_score, etc. See annotate_support for more details.

source


', 284) ])); } const reference_ragtools = /* @__PURE__ */ _export_sfc(_sfc_main, [["render", _sfc_render]]); diff --git a/previews/PR218/assets/reference_ragtools.md.gYCqDEbn.js b/previews/PR218/assets/reference_ragtools.md.Bby7eP61.lean.js similarity index 93% rename from previews/PR218/assets/reference_ragtools.md.gYCqDEbn.js rename to previews/PR218/assets/reference_ragtools.md.Bby7eP61.lean.js index 0b8c465d3..2545f9b0d 100644 --- a/previews/PR218/assets/reference_ragtools.md.gYCqDEbn.js +++ b/previews/PR218/assets/reference_ragtools.md.Bby7eP61.lean.js @@ -3,7 +3,7 @@ const __pageData = JSON.parse('{"title":"Reference for RAGTools","description":" const _sfc_main = { name: "reference_ragtools.md" }; function _sfc_render(_ctx, _cache, $props, $setup, $data, $options) { return openBlock(), createElementBlock("div", null, _cache[0] || (_cache[0] = [ - createStaticVNode('

Reference for RAGTools

# PromptingTools.Experimental.RAGToolsModule.
julia
RAGTools

Provides Retrieval-Augmented Generation (RAG) functionality.

Requires: LinearAlgebra, SparseArrays, Unicode, PromptingTools for proper functionality.

This module is experimental and may change at any time. It is intended to be moved to a separate package in the future.

source


# PromptingTools.Experimental.RAGTools.AbstractCandidateChunksType.
julia
AbstractCandidateChunks

Abstract type for storing candidate chunks, ie, references to items in a AbstractChunkIndex.

Return type from find_closest and find_tags functions.

Required Fields

source


# PromptingTools.Experimental.RAGTools.AbstractChunkIndexType.
julia
AbstractChunkIndex <: AbstractDocumentIndex

Main abstract type for storing document chunks and their embeddings. It also stores tags and sources for each chunk.

Required Fields

source


# PromptingTools.Experimental.RAGTools.AbstractGeneratorType.
julia
AbstractGenerator <: AbstractGenerationMethod

Abstract type for generating an answer with generate! (use to change the process / return type of generate).

Required Fields

source


# PromptingTools.Experimental.RAGTools.AbstractIndexBuilderType.
julia
AbstractIndexBuilder

Abstract type for building an index with build_index (use to change the process / return type of build_index).

Required Fields

source


# PromptingTools.Experimental.RAGTools.AbstractMultiIndexType.
julia
AbstractMultiIndex <: AbstractDocumentIndex

Experimental abstract type for storing multiple document indexes. Not yet implemented.

source


# PromptingTools.Experimental.RAGTools.AbstractRetrieverType.
julia
AbstractRetriever <: AbstractRetrievalMethod

Abstract type for retrieving chunks from an index with retrieve (use to change the process / return type of retrieve).

Required Fields

source


# PromptingTools.Experimental.RAGTools.AdvancedGeneratorType.
julia
AdvancedGenerator <: AbstractGenerator

Default implementation for generate!. It simply enumerates context snippets and runs aigenerate (no refinement).

It uses ContextEnumerator, SimpleAnswerer, SimpleRefiner, and NoPostprocessor as default contexter, answerer, refiner, and postprocessor.

source


# PromptingTools.Experimental.RAGTools.AdvancedRetrieverType.
julia
AdvancedRetriever <: AbstractRetriever

Dispatch for retrieve with advanced retrieval methods to improve result quality. Compared to SimpleRetriever, it adds rephrasing the query and reranking the results.

Fields

source


# PromptingTools.Experimental.RAGTools.AllTagFilterType.
julia
AllTagFilter <: AbstractTagFilter

Finds the chunks that have ALL OF the specified tag(s). A method for find_tags.

source


# PromptingTools.Experimental.RAGTools.AnnotatedNodeType.
julia
AnnotatedNode{T}  <: AbstractAnnotatedNode

A node to add annotations to the generated answer in airag

Annotations can be: sources, scores, whether its supported or not by the context, etc.

Fields

source


# PromptingTools.Experimental.RAGTools.AnyTagFilterType.
julia
AnyTagFilter <: AbstractTagFilter

Finds the chunks that have ANY OF the specified tag(s). A method for find_tags.

source


# PromptingTools.Experimental.RAGTools.BM25SimilarityType.
julia
BM25Similarity <: AbstractSimilarityFinder

Finds the closest chunks to a query embedding by measuring the BM25 similarity between the query and the chunks' embeddings in binary form. A method for find_closest.

Reference: Wikipedia: BM25. Implementation follows: The Next Generation of Lucene Relevance.

source


# PromptingTools.Experimental.RAGTools.BatchEmbedderType.
julia
BatchEmbedder <: AbstractEmbedder

Default embedder for get_embeddings functions. It passes individual documents to be embedded in chunks to aiembed.

source


# PromptingTools.Experimental.RAGTools.BinaryBatchEmbedderType.
julia
BinaryBatchEmbedder <: AbstractEmbedder

Same as BatchEmbedder but reduces the embeddings matrix to a binary form (eg, BitMatrix). Defines a method for get_embeddings.

Reference: HuggingFace: Embedding Quantization.

source


# PromptingTools.Experimental.RAGTools.BinaryCosineSimilarityType.
julia
BinaryCosineSimilarity <: AbstractSimilarityFinder

Finds the closest chunks to a query embedding by measuring the Hamming distance AND cosine similarity between the query and the chunks' embeddings in binary form. A method for find_closest.

It follows the two-pass approach:

Reference: HuggingFace: Embedding Quantization.

source


# PromptingTools.Experimental.RAGTools.BitPackedBatchEmbedderType.
julia
BitPackedBatchEmbedder <: AbstractEmbedder

Same as BatchEmbedder but reduces the embeddings matrix to a binary form packed in UInt64 (eg, BitMatrix.chunks). Defines a method for get_embeddings.

See also utilities pack_bits and unpack_bits to move between packed/non-packed binary forms.

Reference: HuggingFace: Embedding Quantization.

source


# PromptingTools.Experimental.RAGTools.BitPackedCosineSimilarityType.
julia
BitPackedCosineSimilarity <: AbstractSimilarityFinder

Finds the closest chunks to a query embedding by measuring the Hamming distance AND cosine similarity between the query and the chunks' embeddings in binary form. A method for find_closest.

The difference to BinaryCosineSimilarity is that the binary values are packed into UInt64, which is more efficient.

Reference: HuggingFace: Embedding Quantization. Implementation of hamming_distance is based on TinyRAG.

source


# PromptingTools.Experimental.RAGTools.CandidateChunksType.
julia
CandidateChunks

A struct for storing references to chunks in the given index (identified by index_id) called positions and scores holding the strength of similarity (=1 is the highest, most similar). It's the result of the retrieval stage of RAG.

Fields

source


# PromptingTools.Experimental.RAGTools.ChunkEmbeddingsIndexType.
julia
ChunkEmbeddingsIndex

Main struct for storing document chunks and their embeddings. It also stores tags and sources for each chunk.

Previously, this struct was called ChunkIndex.

Fields

source


# PromptingTools.Experimental.RAGTools.ChunkKeywordsIndexType.
julia
ChunkKeywordsIndex

Struct for storing chunks of text and associated keywords for BM25 similarity search.

Fields

Example

We can easily create a keywords-based index from a standard embeddings-based index.

julia
\n# Let's assume we have a standard embeddings-based index\nindex = build_index(SimpleIndexer(), texts; chunker_kwargs = (; max_length=10))\n\n# Creating an additional index for keyword-based search (BM25), is as simple as\nindex_keywords = ChunkKeywordsIndex(index)\n\n# We can immediately create a MultiIndex (a hybrid index holding both indices)\nmulti_index = MultiIndex([index, index_keywords])

You can also build the index via build_index

julia
# given some sentences and sources\nindex_keywords = build_index(KeywordsIndexer(), sentences; chunker_kwargs=(; sources))\n\n# Retrive closest chunks with\nretriever = SimpleBM25Retriever()\nresult = retrieve(retriever, index_keywords, "What are the best practices for parallel computing in Julia?")\nresult.context

If you want to use airag, don't forget to specify the config to make sure keywords are processed (ie, tokenized) and that BM25 is used for searching candidates

julia
cfg = RAGConfig(; retriever = SimpleBM25Retriever());\nairag(cfg, index_keywords;\n    question = "What are the best practices for parallel computing in Julia?")

source


# PromptingTools.Experimental.RAGTools.ChunkKeywordsIndexMethod.
julia
ChunkKeywordsIndex(\n    [processor::AbstractProcessor=KeywordsProcessor(),] index::ChunkEmbeddingsIndex; verbose::Int = 1,\n    index_id = gensym("ChunkKeywordsIndex"), processor_kwargs...)

Convenience method to quickly create a ChunkKeywordsIndex from an existing ChunkEmbeddingsIndex.

Example

julia
\n# Let's assume we have a standard embeddings-based index\nindex = build_index(SimpleIndexer(), texts; chunker_kwargs = (; max_length=10))\n\n# Creating an additional index for keyword-based search (BM25), is as simple as\nindex_keywords = ChunkKeywordsIndex(index)\n\n# We can immediately create a MultiIndex (a hybrid index holding both indices)\nmulti_index = MultiIndex([index, index_keywords])

source


# PromptingTools.Experimental.RAGTools.CohereRerankerType.
julia
CohereReranker <: AbstractReranker

Rerank strategy using the Cohere Rerank API. Requires an API key. A method for rerank.

source


# PromptingTools.Experimental.RAGTools.ContextEnumeratorType.
julia
ContextEnumerator <: AbstractContextBuilder

Default method for build_context! method. It simply enumerates the context snippets around each position in candidates. When possibly, it will add surrounding chunks (from the same source).

source


# PromptingTools.Experimental.RAGTools.CosineSimilarityType.
julia
CosineSimilarity <: AbstractSimilarityFinder

Finds the closest chunks to a query embedding by measuring the cosine similarity between the query and the chunks' embeddings. A method for find_closest (see the docstring for more details and usage example).

source


# PromptingTools.Experimental.RAGTools.DocumentTermMatrixType.
julia
DocumentTermMatrix{T<:AbstractString}

A sparse matrix of term frequencies and document lengths to allow calculation of BM25 similarity scores.

source


# PromptingTools.Experimental.RAGTools.FileChunkerType.
julia
FileChunker <: AbstractChunker

Chunker when you provide file paths to get_chunks functions.

Ie, the inputs will be validated first (eg, file exists, etc) and then read into memory.

Set as default chunker in get_chunks functions.

source


# PromptingTools.Experimental.RAGTools.FlashRankerType.
julia
FlashRanker <: AbstractReranker

Rerank strategy using the package FlashRank.jl and local models. A method for rerank.

You must first import the FlashRank.jl package. To automatically download any required models, set your ENV["DATADEPS_ALWAYS_ACCEPT"] = true (see DataDeps for more details).

Example

julia
using FlashRank\n\n# Wrap the model to be a valid Ranker recognized by RAGTools\n# It will be provided to the airag/rerank function to avoid instantiating it on every call\nreranker = FlashRank.RankerModel(:mini) |> FlashRanker\n# You can choose :tiny or :mini\n\n## Apply to the pipeline configuration, eg, \ncfg = RAGConfig(; retriever = AdvancedRetriever(; reranker))\n\n# Ask a question (assumes you have some `index`)\nquestion = "What are the best practices for parallel computing in Julia?"\nresult = airag(cfg, index; question, return_all = true)

source


# PromptingTools.Experimental.RAGTools.HTMLStylerType.
julia
HTMLStyler

Defines styling via classes (attribute class) and styles (attribute style) for HTML formatting of AbstractAnnotatedNode

source


# PromptingTools.Experimental.RAGTools.HyDERephraserType.
julia
HyDERephraser <: AbstractRephraser

Rephraser implemented using the provided AI Template (eg, ...) and standard chat model. A method for rephrase.

It uses a prompt-based rephrasing method called HyDE (Hypothetical Document Embedding), where instead of looking for an embedding of the question, we look for the documents most similar to a synthetic passage that would be a good answer to our question.

Reference: Arxiv paper.

source


# PromptingTools.Experimental.RAGTools.JudgeAllScoresType.

final_rating is the average of all scoring criteria. Explain the final_rating in rationale

source


# PromptingTools.Experimental.RAGTools.JudgeRatingType.

Provide the final_rating between 1-5. Provide the rationale for it.

source


# PromptingTools.Experimental.RAGTools.KeywordsIndexerType.
julia
KeywordsIndexer <: AbstractIndexBuilder

Keyword-based index (BM25) to be returned by build_index.

It uses TextChunker, KeywordsProcessor, and NoTagger as default chunker, processor, and tagger.

source


# PromptingTools.Experimental.RAGTools.KeywordsProcessorType.
julia
KeywordsProcessor <: AbstractProcessor

Default keywords processor for get_keywords functions. It normalizes the documents, tokenizes them and builds a DocumentTermMatrix.

source


# PromptingTools.Experimental.RAGTools.MultiCandidateChunksType.
julia
MultiCandidateChunks

A struct for storing references to multiple sets of chunks across different indices. Each set of chunks is identified by an index_id in index_ids, with corresponding positions in the index and scores indicating the strength of similarity.

This struct is useful for scenarios where candidates are drawn from multiple indices, and there is a need to keep track of which candidates came from which index.

Fields

source


# PromptingTools.Experimental.RAGTools.MultiFinderType.
julia
MultiFinder <: AbstractSimilarityFinder

Composite finder for MultiIndex where we want to set multiple finders for each index. A method for find_closest. Positions correspond to indexes(::MultiIndex).

source


# PromptingTools.Experimental.RAGTools.MultiIndexType.
julia
MultiIndex

Composite index that stores multiple ChunkIndex objects and their embeddings.

Fields

Use accesor indexes to access the individual indexes.

Examples

We can create a MultiIndex from a vector of AbstractChunkIndex objects.

julia
index = build_index(SimpleIndexer(), texts; chunker_kwargs = (; sources))\nindex_keywords = ChunkKeywordsIndex(index) # same chunks as above but adds BM25 instead of embeddings\n\nmulti_index = MultiIndex([index, index_keywords])

To use airag with different types of indices, we need to specify how to find the closest items for each index

julia
# Cosine similarity for embeddings and BM25 for keywords, same order as indexes in MultiIndex\nfinder = RT.MultiFinder([RT.CosineSimilarity(), RT.BM25Similarity()])\n\n# Notice that we add `processor` to make sure keywords are processed (ie, tokenized) as well\ncfg = RAGConfig(; retriever = SimpleRetriever(; processor = RT.KeywordsProcessor(), finder))\n\n# Ask questions\nmsg = airag(cfg, multi_index; question = "What are the best practices for parallel computing in Julia?")\npprint(msg) # prettify the answer

source


# PromptingTools.Experimental.RAGTools.NoEmbedderType.
julia
NoEmbedder <: AbstractEmbedder

No-op embedder for get_embeddings functions. It returns nothing.

source


# PromptingTools.Experimental.RAGTools.NoPostprocessorType.
julia
NoPostprocessor <: AbstractPostprocessor

Default method for postprocess! method. A passthrough option that returns the result without any changes.

Overload this method to add custom postprocessing steps, eg, logging, saving conversations to disk, etc.

source


# PromptingTools.Experimental.RAGTools.NoProcessorType.
julia
NoProcessor <: AbstractProcessor

No-op processor for get_keywords functions. It returns the inputs as is.

source


# PromptingTools.Experimental.RAGTools.NoRefinerType.
julia
NoRefiner <: AbstractRefiner

Default method for refine! method. A passthrough option that returns the result.answer without any changes.

source


# PromptingTools.Experimental.RAGTools.NoRephraserType.
julia
NoRephraser <: AbstractRephraser

No-op implementation for rephrase, which simply passes the question through.

source


# PromptingTools.Experimental.RAGTools.NoRerankerType.
julia
NoReranker <: AbstractReranker

No-op implementation for rerank, which simply passes the candidate chunks through.

source


# PromptingTools.Experimental.RAGTools.NoTagFilterType.
julia
NoTagFilter <: AbstractTagFilter

No-op implementation for find_tags, which simply returns all chunks.

source


# PromptingTools.Experimental.RAGTools.NoTaggerType.
julia
NoTagger <: AbstractTagger

No-op tagger for get_tags functions. It returns (nothing, nothing).

source


# PromptingTools.Experimental.RAGTools.OpenTaggerType.
julia
OpenTagger <: AbstractTagger

Tagger for get_tags functions, which generates possible tags for each chunk via aiextract. You can customize it via prompt template (default: :RAGExtractMetadataShort), but it's quite open-ended (ie, AI decides the possible tags).

source


# PromptingTools.Experimental.RAGTools.PassthroughTaggerType.
julia
PassthroughTagger <: AbstractTagger

Tagger for get_tags functions, which passes tags directly as Vector of Vectors of strings (ie, tags[i] is the tags for docs[i]).

source


# PromptingTools.Experimental.RAGTools.RAGConfigType.
julia
RAGConfig <: AbstractRAGConfig

Default configuration for RAG. It uses SimpleIndexer, SimpleRetriever, and SimpleGenerator as default components. Provided as the first argument in airag.

To customize the components, replace corresponding fields for each step of the RAG pipeline (eg, use subtypes(AbstractIndexBuilder) to find the available options).

source


# PromptingTools.Experimental.RAGTools.RAGResultType.
julia
RAGResult

A struct for debugging RAG answers. It contains the question, answer, context, and the candidate chunks at each step of the RAG pipeline.

Think of the flow as question -> rephrased_questions -> answer -> final_answer with the context and candidate chunks helping along the way.

Fields

See also: pprint (pretty printing), annotate_support (for annotating the answer)

source


# PromptingTools.Experimental.RAGTools.RankGPTRerankerType.
julia
RankGPTReranker <: AbstractReranker

Rerank strategy using the RankGPT algorithm (calling LLMs). A method for rerank.

Reference

[1] Is ChatGPT Good at Search? Investigating Large Language Models as Re-Ranking Agents by W. Sun et al. [2] RankGPT Github

source


# PromptingTools.Experimental.RAGTools.RankGPTResultType.
julia
RankGPTResult

Results from the RankGPT algorithm.

Fields

source


# PromptingTools.Experimental.RAGTools.SimpleAnswererType.
julia
SimpleAnswerer <: AbstractAnswerer

Default method for answer! method. Generates an answer using the aigenerate function with the provided context and question.

source


# PromptingTools.Experimental.RAGTools.SimpleBM25RetrieverType.
julia
SimpleBM25Retriever <: AbstractRetriever

Keyword-based implementation for retrieve. It does a simple similarity search via BM25Similarity and returns the results.

Make sure to use consistent processor and tagger with the Preparation Stage (build_index)!

Fields

source


# PromptingTools.Experimental.RAGTools.SimpleGeneratorType.
julia
SimpleGenerator <: AbstractGenerator

Default implementation for generate. It simply enumerates context snippets and runs aigenerate (no refinement).

It uses ContextEnumerator, SimpleAnswerer, NoRefiner, and NoPostprocessor as default contexter, answerer, refiner, and postprocessor.

source


# PromptingTools.Experimental.RAGTools.SimpleIndexerType.
julia
SimpleIndexer <: AbstractIndexBuilder

Default implementation for build_index.

It uses TextChunker, BatchEmbedder, and NoTagger as default chunker, embedder, and tagger.

source


# PromptingTools.Experimental.RAGTools.SimpleRefinerType.
julia
SimpleRefiner <: AbstractRefiner

Refines the answer using the same context previously provided via the provided prompt template. A method for refine!.

source


# PromptingTools.Experimental.RAGTools.SimpleRephraserType.
julia
SimpleRephraser <: AbstractRephraser

Rephraser implemented using the provided AI Template (eg, ...) and standard chat model. A method for rephrase.

source


# PromptingTools.Experimental.RAGTools.SimpleRetrieverType.
julia
SimpleRetriever <: AbstractRetriever

Default implementation for retrieve function. It does a simple similarity search via CosineSimilarity and returns the results.

Make sure to use consistent embedder and tagger with the Preparation Stage (build_index)!

Fields

source


# PromptingTools.Experimental.RAGTools.StylerType.
julia
Styler

Defines styling keywords for printstyled for each AbstractAnnotatedNode

source


# PromptingTools.Experimental.RAGTools.SubChunkIndexType.
julia
SubChunkIndex

A view of the parent index with respect to the chunks (and chunk-aligned fields). All methods and accessors working for AbstractChunkIndex also work for SubChunkIndex. It does not yet work for MultiIndex.

Fields

Example

julia
cc = CandidateChunks(index.id, 1:10)\nsub_index = @view(index[cc])

You can use SubChunkIndex to access chunks or sources (and other fields) from a parent index, eg,

julia
RT.chunks(sub_index)\nRT.sources(sub_index)\nRT.chunkdata(sub_index) # slice of embeddings\nRT.embeddings(sub_index) # slice of embeddings\nRT.tags(sub_index) # slice of tags\nRT.tags_vocab(sub_index) # unchanged, identical to parent version\nRT.extras(sub_index) # slice of extras

Access the parent index that the positions correspond to

julia
parent(sub_index)\nRT.positions(sub_index)

source


# PromptingTools.Experimental.RAGTools.SubDocumentTermMatrixType.

A partial view of a DocumentTermMatrix, tf is MATERIALIZED for performance and fewer allocations.

source


# PromptingTools.Experimental.RAGTools.TavilySearchRefinerType.
julia
TavilySearchRefiner <: AbstractRefiner

Refines the answer by executing a web search using the Tavily API. This method aims to enhance the answer's accuracy and relevance by incorporating information retrieved from the web. A method for refine!.

source


# PromptingTools.Experimental.RAGTools.TextChunkerType.
julia
TextChunker <: AbstractChunker

Chunker when you provide text to get_chunks functions. Inputs are directly chunked

source


# PromptingTools.Experimental.RAGTools.TrigramAnnotaterType.
julia
TrigramAnnotater

Annotation method where we score answer versus each context based on word-level trigrams that match.

It's very simple method (and it can loose some semantic meaning in longer sequences like negative), but it works reasonably well for both text and code.

source


# PromptingTools.Experimental.RAGTools._normalizeFunction.

Shortcut to LinearAlgebra.normalize. Provided in the package extension RAGToolsExperimentalExt (Requires SparseArrays, Unicode, and LinearAlgebra)

source


# PromptingTools.Experimental.RAGTools.add_node_metadata!Method.
julia
add_node_metadata!(annotater::TrigramAnnotater,\n    root::AnnotatedNode; add_sources::Bool = true, add_scores::Bool = true,\n    sources::Union{Nothing, AbstractVector{<:AbstractString}} = nothing)

Adds metadata to the children of root. Metadata includes sources and scores, if requested.

Optionally, it can add a list of sources at the end of the printed text.

The metadata is added by inserting new nodes in the root children list (with no children of its own to be printed out).

source


# PromptingTools.Experimental.RAGTools.airagMethod.
julia
airag(cfg::AbstractRAGConfig, index::AbstractDocumentIndex;\n    question::AbstractString,\n    verbose::Integer = 1, return_all::Bool = false,\n    api_kwargs::NamedTuple = NamedTuple(),\n    retriever::AbstractRetriever = cfg.retriever,\n    retriever_kwargs::NamedTuple = NamedTuple(),\n    generator::AbstractGenerator = cfg.generator,\n    generator_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0))

High-level wrapper for Retrieval-Augmented Generation (RAG), it combines together the retrieve and generate! steps which you can customize if needed.

The simplest version first finds the relevant chunks in index for the question and then sends these chunks to the AI model to help with generating a response to the question.

To customize the components, replace the types (retriever, generator) of the corresponding step of the RAG pipeline - or go into sub-routines within the steps. Eg, use subtypes(AbstractRetriever) to find the available options.

Arguments

Returns

See also build_index, retrieve, generate!, RAGResult, getpropertynested, setpropertynested, merge_kwargs_nested, ChunkKeywordsIndex.

Examples

Using airag to get a response for a question:

julia
index = build_index(...)  # create an index\nquestion = "How to make a barplot in Makie.jl?"\nmsg = airag(index; question)

To understand the details of the RAG process, use return_all=true

julia
msg, details = airag(index; question, return_all = true)\n# details is a RAGDetails object with all the internal steps of the `airag` function

You can also pretty-print details to highlight generated text vs text that is supported by context. It also includes annotations of which context was used for each part of the response (where available).

julia
PT.pprint(details)

Example with advanced retrieval (with question rephrasing and reranking (requires COHERE_API_KEY). We will obtain top 100 chunks from embeddings (top_k) and top 5 chunks from reranking (top_n). In addition, it will be done with a "custom" locally-hosted model.

julia
cfg = RAGConfig(; retriever = AdvancedRetriever())\n\n# kwargs will be big and nested, let's prepare them upfront\n# we specify "custom" model for each component that calls LLM\nkwargs = (\n    retriever_kwargs = (;\n        top_k = 100,\n        top_n = 5,\n        rephraser_kwargs = (;\n            model = "custom"),\n        embedder_kwargs = (;\n            model = "custom"),\n        tagger_kwargs = (;\n            model = "custom")),\n    generator_kwargs = (;\n        answerer_kwargs = (;\n            model = "custom"),\n        refiner_kwargs = (;\n            model = "custom")),\n    api_kwargs = (;\n        url = "http://localhost:8080"))\n\nresult = airag(cfg, index, question; kwargs...)

If you want to use hybrid retrieval (embeddings + BM25), you can easily create an additional index based on keywords and pass them both into a MultiIndex.

You need to provide an explicit config, so the pipeline knows how to handle each index in the search similarity phase (finder).

julia
index = # your existing index\n\n# create the multi-index with the keywords index\nindex_keywords = ChunkKeywordsIndex(index)\nmulti_index = MultiIndex([index, index_keywords])\n\n# define the similarity measures for the indices that you have (same order)\nfinder = RT.MultiFinder([RT.CosineSimilarity(), RT.BM25Similarity()])\ncfg = RAGConfig(; retriever=AdvancedRetriever(; processor=RT.KeywordsProcessor(), finder))\n\n# Run the pipeline with the new hybrid retrieval (return the `RAGResult` to see the details)\nresult = airag(cfg, multi_index; question, return_all=true)\n\n# Pretty-print the result\nPT.pprint(result)

For easier manipulation of nested kwargs, see utilities getpropertynested, setpropertynested, merge_kwargs_nested.

source


# PromptingTools.Experimental.RAGTools.align_node_styles!Method.
julia
align_node_styles!(annotater::TrigramAnnotater, nodes::AbstractVector{<:AnnotatedNode}; kwargs...)

Aligns the styles of the nodes based on the surrounding nodes ("fill-in-the-middle").

If the node has no score, but the surrounding nodes have the same style, the node will inherit the style of the surrounding nodes.

source


# PromptingTools.Experimental.RAGTools.annotate_supportMethod.
julia
annotate_support(annotater::TrigramAnnotater, answer::AbstractString,\n    context::AbstractVector; min_score::Float64 = 0.5,\n    skip_trigrams::Bool = true, hashed::Bool = true,\n    sources::Union{Nothing, AbstractVector{<:AbstractString}} = nothing,\n    min_source_score::Float64 = 0.25,\n    add_sources::Bool = true,\n    add_scores::Bool = true, kwargs...)

Annotates the answer with the overlap/what's supported in context and returns the annotated tree of nodes representing the answer

Returns a "root" node with children nodes representing the sentences/code blocks in the answer. Only the "leaf" nodes are to be printed (to avoid duplication), "leaf" nodes are those with NO children.

Default logic:

Arguments

Example

julia
annotater = TrigramAnnotater()\ncontext = [\n    "This is a test context.", "Another context sentence.", "Final piece of context."]\nanswer = "This is a test context. Another context sentence."\n\nannotated_root = annotate_support(annotater, answer, context)\npprint(annotated_root) # pretty print the annotated tree

source


# PromptingTools.Experimental.RAGTools.annotate_supportMethod.
julia
annotate_support(\n    annotater::TrigramAnnotater, result::AbstractRAGResult; min_score::Float64 = 0.5,\n    skip_trigrams::Bool = true, hashed::Bool = true,\n    min_source_score::Float64 = 0.25,\n    add_sources::Bool = true,\n    add_scores::Bool = true, kwargs...)

Dispatch for annotate_support for AbstractRAGResult type. It extracts the final_answer and context from the result and calls annotate_support with them.

See annotate_support for more details.

Example

julia
res = RAGResult(; question = "", final_answer = "This is a test.",\n    context = ["Test context.", "Completely different"])\nannotated_root = annotate_support(annotater, res)\nPT.pprint(annotated_root)

source


# PromptingTools.Experimental.RAGTools.answer!Method.
julia
answer!(\n    answerer::SimpleAnswerer, index::AbstractDocumentIndex, result::AbstractRAGResult;\n    model::AbstractString = PT.MODEL_CHAT, verbose::Bool = true,\n    template::Symbol = :RAGAnswerFromContext,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Generates an answer using the aigenerate function with the provided result.context and result.question.

Returns

Arguments

source


# PromptingTools.Experimental.RAGTools.build_contextMethod.
julia
build_context(contexter::ContextEnumerator,\n    index::AbstractDocumentIndex, candidates::AbstractCandidateChunks;\n    verbose::Bool = true,\n    chunks_window_margin::Tuple{Int, Int} = (1, 1), kwargs...)\n\n    build_context!(contexter::ContextEnumerator,\n    index::AbstractDocumentIndex, result::AbstractRAGResult; kwargs...)

Build context strings for each position in candidates considering a window margin around each position. If mutating version is used (build_context!), it will use result.reranked_candidates to update the result.context field.

Arguments

Returns

Examples

julia
index = ChunkIndex(...)  # Assuming a proper index is defined\ncandidates = CandidateChunks(index.id, [2, 4], [0.1, 0.2])\ncontext = build_context(ContextEnumerator(), index, candidates; chunks_window_margin=(0, 1)) # include only one following chunk for each matching chunk

source


# PromptingTools.Experimental.RAGTools.build_indexMethod.
julia
build_index(\n    indexer::KeywordsIndexer, files_or_docs::Vector{<:AbstractString};\n    verbose::Integer = 1,\n    extras::Union{Nothing, AbstractVector} = nothing,\n    index_id = gensym("ChunkKeywordsIndex"),\n    chunker::AbstractChunker = indexer.chunker,\n    chunker_kwargs::NamedTuple = NamedTuple(),\n    processor::AbstractProcessor = indexer.processor,\n    processor_kwargs::NamedTuple = NamedTuple(),\n    tagger::AbstractTagger = indexer.tagger,\n    tagger_kwargs::NamedTuple = NamedTuple(),\n    api_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0))

Builds a ChunkKeywordsIndex from the provided files or documents to support keyword-based search (BM25).

source


# PromptingTools.Experimental.RAGTools.build_indexMethod.
julia
build_index(\n    indexer::AbstractIndexBuilder, files_or_docs::Vector{<:AbstractString};\n    verbose::Integer = 1,\n    extras::Union{Nothing, AbstractVector} = nothing,\n    index_id = gensym("ChunkEmbeddingsIndex"),\n    chunker::AbstractChunker = indexer.chunker,\n    chunker_kwargs::NamedTuple = NamedTuple(),\n    embedder::AbstractEmbedder = indexer.embedder,\n    embedder_kwargs::NamedTuple = NamedTuple(),\n    tagger::AbstractTagger = indexer.tagger,\n    tagger_kwargs::NamedTuple = NamedTuple(),\n    api_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0))

Build an INDEX for RAG (Retriever-Augmented Generation) applications from the provided file paths. INDEX is a object storing the document chunks and their embeddings (and potentially other information).

The function processes each file or document (depending on chunker), splits its content into chunks, embeds these chunks, optionally extracts metadata, and then combines this information into a retrievable index.

Define your own methods via indexer and its subcomponents (chunker, embedder, tagger).

Arguments

Returns

See also: ChunkEmbeddingsIndex, get_chunks, get_embeddings, get_tags, CandidateChunks, find_closest, find_tags, rerank, retrieve, generate!, airag

Examples

julia
# Default is loading a vector of strings and chunking them (`TextChunker()`)\nindex = build_index(SimpleIndexer(), texts; chunker_kwargs = (; max_length=10))\n\n# Another example with tags extraction, splitting only sentences and verbose output\n# Assuming `test_files` is a vector of file paths\nindexer = SimpleIndexer(chunker=FileChunker(), tagger=OpenTagger())\nindex = build_index(indexer, test_files; \n        chunker_kwargs(; separators=[". "]), verbose=true)

Notes

source


# PromptingTools.Experimental.RAGTools.build_qa_evalsMethod.
julia
build_qa_evals(doc_chunks::Vector{<:AbstractString}, sources::Vector{<:AbstractString};\n               model=PT.MODEL_CHAT, instructions="None.", qa_template::Symbol=:RAGCreateQAFromContext, \n               verbose::Bool=true, api_kwargs::NamedTuple = NamedTuple(), kwargs...) -> Vector{QAEvalItem}

Create a collection of question and answer evaluations (QAEvalItem) from document chunks and sources. This function generates Q&A pairs based on the provided document chunks, using a specified AI model and template.

Arguments

Returns

Vector{QAEvalItem}: A vector of QAEvalItem structs, each containing a source, context, question, and answer. Invalid or empty items are filtered out.

Notes

Examples

Creating Q&A evaluations from a set of document chunks:

julia
doc_chunks = ["Text from document 1", "Text from document 2"]\nsources = ["source1", "source2"]\nqa_evals = build_qa_evals(doc_chunks, sources)

source


# PromptingTools.Experimental.RAGTools.build_tagsFunction.

Builds a matrix of tags and a vocabulary list. REQUIRES SparseArrays, LinearAlgebra, Unicode packages to be loaded!!

source


# PromptingTools.Experimental.RAGTools.build_tagsMethod.
julia
build_tags(tagger::AbstractTagger, chunk_tags::Nothing; kwargs...)

No-op that skips any tag building, returning nothing, nothing

Otherwise, it would build the sparse matrix and the vocabulary (requires SparseArrays and LinearAlgebra packages to be loaded).

source


# PromptingTools.Experimental.RAGTools.chunkdataMethod.

Access chunkdata for a subset of chunks, chunk_idx is a vector of chunk indices in the index

source


# PromptingTools.Experimental.RAGTools.chunkdataMethod.

Access chunkdata for a subset of chunks, chunk_idx is a vector of chunk indices in the index

source


# PromptingTools.Experimental.RAGTools.chunkdataMethod.

Access chunkdata for a subset of chunks, chunk_idx is a vector of chunk indices in the index

source


# PromptingTools.Experimental.RAGTools.cohere_apiMethod.
julia
cohere_api(;\napi_key::AbstractString,\nendpoint::String,\nurl::AbstractString="https://api.cohere.ai/v1",\nhttp_kwargs::NamedTuple=NamedTuple(),\nkwargs...)

Lightweight wrapper around the Cohere API. See https://cohere.com/docs for more details.

Arguments

source


# PromptingTools.Experimental.RAGTools.create_permutation_instructionMethod.
julia
create_permutation_instruction(\n    context::AbstractVector{<:AbstractString}; rank_start::Integer = 1,\n    rank_end::Integer = 100, max_length::Integer = 512, template::Symbol = :RAGRankGPT)

Creates rendered template with injected context passages.

source


# PromptingTools.Experimental.RAGTools.extract_rankingMethod.
julia
extract_ranking(str::AbstractString)

Extracts the ranking from the response into a sorted array of integers.

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(\n    finder::AbstractSimilarityFinder, index::AbstractChunkIndex,\n    query_emb::AbstractVector{<:Real}, query_tokens::AbstractVector{<:AbstractString} = String[];\n    top_k::Int = 100, kwargs...)

Finds the indices of chunks (represented by embeddings in index) that are closest to query embedding (query_emb).

Returns only top_k closest indices.

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(\n    finder::BitPackedCosineSimilarity, emb::AbstractMatrix{<:Bool},\n    query_emb::AbstractVector{<:Real}, query_tokens::AbstractVector{<:AbstractString} = String[];\n    top_k::Int = 100, rescore_multiplier::Int = 4, minimum_similarity::AbstractFloat = -1.0, kwargs...)

Finds the indices of chunks (represented by embeddings in emb) that are closest to query embedding (query_emb) using bit-packed binary embeddings (in the index).

This is a two-pass approach:

Returns only top_k closest indices.

Reference: HuggingFace: Embedding Quantization.

Examples

Convert any Float embeddings to bit-packed binary like this:

julia
bitpacked_emb = pack_bits(emb.>0)

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(\n    finder::BM25Similarity, dtm::AbstractDocumentTermMatrix,\n    query_emb::AbstractVector{<:Real}, query_tokens::AbstractVector{<:AbstractString} = String[];\n    top_k::Int = 100, minimum_similarity::AbstractFloat = -1.0, kwargs...)

Finds the indices of chunks (represented by DocumentTermMatrix in dtm) that are closest to query tokens (query_tokens) using BM25.

Reference: Wikipedia: BM25. Implementation follows: The Next Generation of Lucene Relevance.

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(\n    finder::CosineSimilarity, emb::AbstractMatrix{<:Real},\n    query_emb::AbstractVector{<:Real}, query_tokens::AbstractVector{<:AbstractString} = String[];\n    top_k::Int = 100, minimum_similarity::AbstractFloat = -1.0, kwargs...)

Finds the indices of chunks (represented by embeddings in emb) that are closest (in cosine similarity for CosineSimilarity()) to query embedding (query_emb).

finder is the logic used for the similarity search. Default is CosineSimilarity.

If minimum_similarity is provided, only indices with similarity greater than or equal to it are returned. Similarity can be between -1 and 1 (-1 = completely opposite, 1 = exactly the same).

Returns only top_k closest indices.

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(\n    finder::BinaryCosineSimilarity, emb::AbstractMatrix{<:Bool},\n    query_emb::AbstractVector{<:Real}, query_tokens::AbstractVector{<:AbstractString} = String[];\n    top_k::Int = 100, rescore_multiplier::Int = 4, minimum_similarity::AbstractFloat = -1.0, kwargs...)

Finds the indices of chunks (represented by embeddings in emb) that are closest to query embedding (query_emb) using binary embeddings (in the index).

This is a two-pass approach:

Returns only top_k closest indices.

Reference: HuggingFace: Embedding Quantization.

Examples

Convert any Float embeddings to binary like this:

julia
binary_emb = map(>(0), emb)

source


# PromptingTools.Experimental.RAGTools.find_tagsMethod.
julia
find_tags(method::AnyTagFilter, index::AbstractChunkIndex,\n    tag::Union{AbstractString, Regex}; kwargs...)\n\nfind_tags(method::AnyTagFilter, index::AbstractChunkIndex,\n    tags::Vector{T}; kwargs...) where {T <: Union{AbstractString, Regex}}

Finds the indices of chunks (represented by tags in index) that have ANY OF the specified tag or tags.

source


# PromptingTools.Experimental.RAGTools.find_tagsMethod.
julia
find_tags(method::AllTagFilter, index::AbstractChunkIndex,\n    tag::Union{AbstractString, Regex}; kwargs...)\n\nfind_tags(method::AllTagFilter, index::AbstractChunkIndex,\n    tags::Vector{T}; kwargs...) where {T <: Union{AbstractString, Regex}}

Finds the indices of chunks (represented by tags in index) that have ALL OF the specified tag or tags.

source


# PromptingTools.Experimental.RAGTools.find_tagsMethod.
julia
find_tags(method::NoTagFilter, index::AbstractChunkIndex,\n    tags::Union{T, AbstractVector{<:T}}; kwargs...) where {T <:\n                                                           Union{\n    AbstractString, Regex, Nothing}}\n    tags; kwargs...)

Returns all chunks in the index, ie, no filtering, so we simply return nothing (easier for dispatch).

source


# PromptingTools.Experimental.RAGTools.generate!Method.
julia
generate!(\n    generator::AbstractGenerator, index::AbstractDocumentIndex, result::AbstractRAGResult;\n    verbose::Integer = 1,\n    api_kwargs::NamedTuple = NamedTuple(),\n    contexter::AbstractContextBuilder = generator.contexter,\n    contexter_kwargs::NamedTuple = NamedTuple(),\n    answerer::AbstractAnswerer = generator.answerer,\n    answerer_kwargs::NamedTuple = NamedTuple(),\n    refiner::AbstractRefiner = generator.refiner,\n    refiner_kwargs::NamedTuple = NamedTuple(),\n    postprocessor::AbstractPostprocessor = generator.postprocessor,\n    postprocessor_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Generate the response using the provided generator and the index and result. It is the second step in the RAG pipeline (after retrieve)

Returns the mutated result with the result.final_answer and the full conversation saved in result.conversations[:final_answer].

Notes

Arguments

See also: retrieve, build_context!, ContextEnumerator, answer!, SimpleAnswerer, refine!, NoRefiner, SimpleRefiner, postprocess!, NoPostprocessor

Examples

julia
Assume we already have `index`\n\nquestion = "What are the best practices for parallel computing in Julia?"\n\n# Retrieve the relevant chunks - returns RAGResult\nresult = retrieve(index, question)\n\n# Generate the answer using the default generator, mutates the same result\nresult = generate!(index, result)

source


# PromptingTools.Experimental.RAGTools.get_chunksMethod.
julia
get_chunks(chunker::AbstractChunker,\n    files_or_docs::Vector{<:AbstractString};\n    sources::AbstractVector{<:AbstractString} = files_or_docs,\n    verbose::Bool = true,\n    separators = ["\\n\\n", ". ", "\\n", " "], max_length::Int = 256)

Chunks the provided files_or_docs into chunks of maximum length max_length (if possible with provided separators).

Supports two modes of operation:

Arguments

source


# PromptingTools.Experimental.RAGTools.get_embeddingsMethod.
julia
get_embeddings(embedder::BatchEmbedder, docs::AbstractVector{<:AbstractString};\n    verbose::Bool = true,\n    model::AbstractString = PT.MODEL_EMBEDDING,\n    truncate_dimension::Union{Int, Nothing} = nothing,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    target_batch_size_length::Int = 80_000,\n    ntasks::Int = 4 * Threads.nthreads(),\n    kwargs...)

Embeds a vector of docs using the provided model (kwarg model) in a batched manner - BatchEmbedder.

BatchEmbedder tries to batch embedding calls for roughly 80K characters per call (to avoid exceeding the API rate limit) to reduce network latency.

Notes

Arguments

source


# PromptingTools.Experimental.RAGTools.get_embeddingsMethod.
julia
get_embeddings(embedder::BinaryBatchEmbedder, docs::AbstractVector{<:AbstractString};\n    verbose::Bool = true,\n    model::AbstractString = PT.MODEL_EMBEDDING,\n    truncate_dimension::Union{Int, Nothing} = nothing,\n    return_type::Type = Matrix{Bool},\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    target_batch_size_length::Int = 80_000,\n    ntasks::Int = 4 * Threads.nthreads(),\n    kwargs...)

Embeds a vector of docs using the provided model (kwarg model) in a batched manner and then returns the binary embeddings matrix - BinaryBatchEmbedder.

BinaryBatchEmbedder tries to batch embedding calls for roughly 80K characters per call (to avoid exceeding the API rate limit) to reduce network latency.

Notes

Arguments

source


# PromptingTools.Experimental.RAGTools.get_embeddingsMethod.
julia
get_embeddings(embedder::BitPackedBatchEmbedder, docs::AbstractVector{<:AbstractString};\n    verbose::Bool = true,\n    model::AbstractString = PT.MODEL_EMBEDDING,\n    truncate_dimension::Union{Int, Nothing} = nothing,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    target_batch_size_length::Int = 80_000,\n    ntasks::Int = 4 * Threads.nthreads(),\n    kwargs...)

Embeds a vector of docs using the provided model (kwarg model) in a batched manner and then returns the binary embeddings matrix represented in UInt64 (bit-packed) - BitPackedBatchEmbedder.

BitPackedBatchEmbedder tries to batch embedding calls for roughly 80K characters per call (to avoid exceeding the API rate limit) to reduce network latency.

The best option for FAST and MEMORY-EFFICIENT storage of embeddings, for retrieval use BitPackedCosineSimilarity.

Notes

Arguments

See also: unpack_bits, pack_bits, BitPackedCosineSimilarity.

source


# PromptingTools.Experimental.RAGTools.get_tagsMethod.
julia
get_tags(tagger::NoTagger, docs::AbstractVector{<:AbstractString};\n    kwargs...)

Simple no-op that skips any tagging of the documents

source


# PromptingTools.Experimental.RAGTools.get_tagsMethod.
julia
get_tags(tagger::OpenTagger, docs::AbstractVector{<:AbstractString};\n    verbose::Bool = true,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Extracts "tags" (metadata/keywords) from a vector of docs using the provided model (kwarg model).

Arguments

source


# PromptingTools.Experimental.RAGTools.get_tagsMethod.
julia
get_tags(tagger::PassthroughTagger, docs::AbstractVector{<:AbstractString};\n    tags::AbstractVector{<:AbstractVector{<:AbstractString}},\n    kwargs...)

Pass tags directly as Vector of Vectors of strings (ie, tags[i] is the tags for docs[i]). It then builds the vocabulary from the tags and returns both the tags in matrix form and the vocabulary.

source


# PromptingTools.Experimental.RAGTools.getpropertynestedFunction.
julia
getpropertynested(\n    nt::NamedTuple, parent_keys::Vector{Symbol}, key::Symbol, default = nothing)

Get a property key from a nested NamedTuple nt, where the property is nested to a key in parent_keys.

Useful for nested kwargs where we want to get some property in parent_keys subset (eg, model in retriever_kwargs).

Examples

julia
kw = (; abc = (; def = "x"))\ngetpropertynested(kw, [:abc], :def)\n# Output: "x"

source


# PromptingTools.Experimental.RAGTools.hamming_distanceMethod.
julia
hamming_distance(\n    mat::AbstractMatrix{T}, query::AbstractVector{T})::Vector{Int} where {T <: Integer}

Calculates the column-wise Hamming distance between a matrix of binary vectors mat and a single binary vector vect.

This is the first-pass ranking for BinaryCosineSimilarity method.

Implementation from domluna's tinyRAG.

source


# PromptingTools.Experimental.RAGTools.hcat_truncateMethod.
julia
hcat_truncate(matrices::AbstractVector{<:AbstractMatrix{T}},\n    truncate_dimension::Union{Nothing, Int} = nothing; verbose::Bool = false) where {T <:\n                                                                                     Real}

Horizontal concatenation of matrices, with optional truncation of the rows of each matrix to the specified dimension (reducing embedding dimensionality).

More efficient that a simple splatting, as the resulting matrix is pre-allocated in one go.

Returns: a Matrix{Float32}

Arguments

Examples

julia
a = rand(Float32, 1000, 10)\nb = rand(Float32, 1000, 20)\n\nc = hcat_truncate([a, b])\nsize(c) # (1000, 30)\n\nd = hcat_truncate([a, b], 500)\nsize(d) # (500, 30)

source


# PromptingTools.Experimental.RAGTools.load_textMethod.
julia
load_text(chunker::AbstractChunker, input;\n    kwargs...)

Load text from input using the provided chunker. Called by get_chunks.

Available chunkers:

source


# PromptingTools.Experimental.RAGTools.merge_kwargs_nestedMethod.
julia
merge_kwargs_nested(nt1::NamedTuple, nt2::NamedTuple)

Merges two nested NamedTuples nt1 and nt2 recursively. The nt2 values will overwrite the nt1 values when overlapping.

Example

julia
kw = (; abc = (; def = "x"))\nkw2 = (; abc = (; def = "x", def2 = 2), new = 1)\nmerge_kwargs_nested(kw, kw2)

source


# PromptingTools.Experimental.RAGTools.pack_bitsMethod.
julia
pack_bits(arr::AbstractMatrix{<:Bool}) -> Matrix{UInt64}\npack_bits(vect::AbstractVector{<:Bool}) -> Vector{UInt64}

Pack a matrix or vector of boolean values into a more compact representation using UInt64.

Arguments (Input)

Returns

Examples

For vectors:

julia
bin = rand(Bool, 128)\nbinint = pack_bits(bin)\nbinx = unpack_bits(binint)\n@assert bin == binx

For matrices:

julia
bin = rand(Bool, 128, 10)\nbinint = pack_bits(bin)\nbinx = unpack_bits(binint)\n@assert bin == binx

source


# PromptingTools.Experimental.RAGTools.permutation_step!Method.
julia
permutation_step!(\n    result::RankGPTResult; rank_start::Integer = 1, rank_end::Integer = 100, kwargs...)

One sub-step of the RankGPT algorithm permutation ranking within the window of chunks defined by rank_start and rank_end positions.

source


# PromptingTools.Experimental.RAGTools.preprocess_tokensFunction.
julia
preprocess_tokens(text::AbstractString, stemmer=nothing; stopwords::Union{Nothing,Set{String}}=nothing, min_length::Int=3)

Preprocess provided text by removing numbers, punctuation, and applying stemming for BM25 search index.

Returns a list of preprocessed tokens.

Example

julia
stemmer = Snowball.Stemmer("english")\nstopwords = Set(["a", "an", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "no", "not", "of", "on", "or", "such", "some", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"])\ntext = "This is a sample paragraph to test the functionality of your text preprocessor. It contains a mix of uppercase and lowercase letters, as well as punctuation marks such as commas, periods, and exclamation points! Let's see how your preprocessor handles quotes, like "this one", and also apostrophes, like in don't. Will it preserve the formatting of this paragraph, including the indentation and line breaks?"\npreprocess_tokens(text, stemmer; stopwords)

source


# PromptingTools.Experimental.RAGTools.print_htmlMethod.
julia
print_html([io::IO,] parent_node::AbstractAnnotatedNode)\n\nprint_html([io::IO,] rag::AbstractRAGResult; add_sources::Bool = false,\n    add_scores::Bool = false, default_styler = HTMLStyler(),\n    low_styler = HTMLStyler(styles = "color:magenta", classes = ""),\n    medium_styler = HTMLStyler(styles = "color:blue", classes = ""),\n    high_styler = HTMLStyler(styles = "", classes = ""), styler_kwargs...)

Pretty-prints the annotation parent_node (or RAGResult) to the io stream (or returns the string) in HTML format (assumes node is styled with styler HTMLStyler).

It wraps each "token" into a span with requested styling (HTMLStyler's properties classes and styles). It also replaces new lines with <br> for better HTML formatting.

For any non-HTML styler, it prints the content as plain text.

Returns

See also HTMLStyler, annotate_support, and set_node_style! for how the styling is applied and what the arguments mean.

Examples

Note: RT is an alias for PromptingTools.Experimental.RAGTools

Simple start directly with the RAGResult:

julia
# set up the text/RAGResult\ncontext = [\n    "This is a test context.", "Another context sentence.", "Final piece of context."]\nanswer = "This is a test answer. It has multiple sentences."\nrag = RT.RAGResult(; context, final_answer=answer, question="")\n\n# print the HTML\nprint_html(rag)

Low-level control by creating our AnnotatedNode:

julia
# prepare your HTML styling\nstyler_kwargs = (;\n    default_styler=RT.HTMLStyler(),\n    low_styler=RT.HTMLStyler(styles="color:magenta", classes=""),\n    medium_styler=RT.HTMLStyler(styles="color:blue", classes=""),\n    high_styler=RT.HTMLStyler(styles="", classes=""))\n\n# annotate the text\ncontext = [\n    "This is a test context.", "Another context sentence.", "Final piece of context."]\nanswer = "This is a test answer. It has multiple sentences."\n\nparent_node = RT.annotate_support(\n    RT.TrigramAnnotater(), answer, context; add_sources=false, add_scores=false, styler_kwargs...)\n\n# print the HTML\nprint_html(parent_node)\n\n# or to accumulate more nodes\nio = IOBuffer()\nprint_html(io, parent_node)

source


# PromptingTools.Experimental.RAGTools.rank_gptMethod.
julia
rank_gpt(chunks::AbstractVector{<:AbstractString}, question::AbstractString;\n    verbose::Int = 1, rank_start::Integer = 1, rank_end::Integer = 100,\n    window_size::Integer = 20, step::Integer = 10,\n    num_rounds::Integer = 1, model::String = "gpt4o", kwargs...)

Ranks the chunks based on their relevance for question. Returns the ranking permutation of the chunks in the order they are most relevant to the question (the first is the most relevant).

Example

julia
result = rank_gpt(chunks, question; rank_start=1, rank_end=25, window_size=8, step=4, num_rounds=3, model="gpt4o")

Reference

[1] Is ChatGPT Good at Search? Investigating Large Language Models as Re-Ranking Agents by W. Sun et al. [2] RankGPT Github

source


# PromptingTools.Experimental.RAGTools.rank_sliding_window!Method.
julia
rank_sliding_window!(\n    result::RankGPTResult; verbose::Int = 1, rank_start = 1, rank_end = 100,\n    window_size = 20, step = 10, model::String = "gpt4o", kwargs...)

One single pass of the RankGPT algorithm permutation ranking across all positions between rank_start and rank_end.

source


# PromptingTools.Experimental.RAGTools.receive_permutation!Method.
julia
receive_permutation!(\n    curr_rank::AbstractVector{<:Integer}, response::AbstractString;\n    rank_start::Integer = 1, rank_end::Integer = 100)

Extracts and heals the permutation to contain all ranking positions.

source


# PromptingTools.Experimental.RAGTools.reciprocal_rank_fusionMethod.
julia
reciprocal_rank_fusion(args...; k::Int=60)

Merges multiple rankings and calculates the reciprocal rank score for each chunk (discounted by the inverse of the rank).

Example

julia
positions1 = [1, 3, 5, 7, 9]\npositions2 = [2, 4, 6, 8, 10]\npositions3 = [2, 4, 6, 11, 12]\n\nmerged_positions, scores = reciprocal_rank_fusion(positions1, positions2, positions3)

source


# PromptingTools.Experimental.RAGTools.reciprocal_rank_fusionMethod.
julia
reciprocal_rank_fusion(\n    positions1::AbstractVector{<:Integer}, scores1::AbstractVector{<:T},\n    positions2::AbstractVector{<:Integer},\n    scores2::AbstractVector{<:T}; k::Int = 60) where {T <: Real}

Merges two sets of rankings and their joint scores. Calculates the reciprocal rank score for each chunk (discounted by the inverse of the rank).

Example

julia
positions1 = [1, 3, 5, 7, 9]\nscores1 = [0.9, 0.8, 0.7, 0.6, 0.5]\npositions2 = [2, 4, 6, 8, 10]\nscores2 = [0.5, 0.6, 0.7, 0.8, 0.9]\n\nmerged, scores = reciprocal_rank_fusion(positions1, scores1, positions2, scores2; k = 60)

source


# PromptingTools.Experimental.RAGTools.refine!Method.
julia
refine!(\n    refiner::NoRefiner, index::AbstractChunkIndex, result::AbstractRAGResult;\n    kwargs...)

Simple no-op function for refine!. It simply copies the result.answer and result.conversations[:answer] without any changes.

source


# PromptingTools.Experimental.RAGTools.refine!Method.
julia
refine!(\n    refiner::SimpleRefiner, index::AbstractDocumentIndex, result::AbstractRAGResult;\n    verbose::Bool = true,\n    model::AbstractString = PT.MODEL_CHAT,\n    template::Symbol = :RAGAnswerRefiner,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Give model a chance to refine the answer (using the same or different context than previously provided).

This method uses the same context as the original answer, however, it can be modified to do additional retrieval and use a different context.

Returns

Arguments

source


# PromptingTools.Experimental.RAGTools.refine!Method.
julia
refine!(\n    refiner::TavilySearchRefiner, index::AbstractDocumentIndex, result::AbstractRAGResult;\n    verbose::Bool = true,\n    model::AbstractString = PT.MODEL_CHAT,\n    include_answer::Bool = true,\n    max_results::Integer = 5,\n    include_domains::AbstractVector{<:AbstractString} = String[],\n    exclude_domains::AbstractVector{<:AbstractString} = String[],\n    template::Symbol = :RAGWebSearchRefiner,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Refines the answer by executing a web search using the Tavily API. This method aims to enhance the answer's accuracy and relevance by incorporating information retrieved from the web.

Note: The web results and web answer (if requested) will be added to the context and sources!

Returns

Arguments

Example

julia
refiner!(TavilySearchRefiner(), index, result)\n# See result.final_answer or pprint(result)

To enable this refiner in a full RAG pipeline, simply swap the component in the config:

julia
cfg = RT.RAGConfig()\ncfg.generator.refiner = RT.TavilySearchRefiner()\n\nresult = airag(cfg, index; question, return_all = true)\npprint(result)

source


# PromptingTools.Experimental.RAGTools.rephraseMethod.
julia
rephrase(rephraser::SimpleRephraser, question::AbstractString;\n    verbose::Bool = true,\n    model::String = PT.MODEL_CHAT, template::Symbol = :RAGQueryHyDE,\n    cost_tracker = Threads.Atomic{Float64}(0.0))

Rephrases the question using the provided rephraser template = RAGQueryHyDE.

Special flavor of rephrasing using HyDE (Hypothetical Document Embedding) method, which aims to find the documents most similar to a synthetic passage that would be a good answer to our question.

Returns both the original and the rephrased question.

Arguments

source


# PromptingTools.Experimental.RAGTools.rephraseMethod.
julia
rephrase(rephraser::NoRephraser, question::AbstractString; kwargs...)

No-op, simple passthrough.

source


# PromptingTools.Experimental.RAGTools.rephraseMethod.
julia
rephrase(rephraser::SimpleRephraser, question::AbstractString;\n    verbose::Bool = true,\n    model::String = PT.MODEL_CHAT, template::Symbol = :RAGQueryOptimizer,\n    cost_tracker = Threads.Atomic{Float64}(0.0), kwargs...)

Rephrases the question using the provided rephraser template.

Returns both the original and the rephrased question.

Arguments

source


# PromptingTools.Experimental.RAGTools.rerankMethod.
julia
rerank(\n    reranker::CohereReranker, index::AbstractDocumentIndex, question::AbstractString,\n    candidates::AbstractCandidateChunks;\n    verbose::Bool = false,\n    api_key::AbstractString = PT.COHERE_API_KEY,\n    top_n::Integer = length(candidates.scores),\n    model::AbstractString = "rerank-english-v3.0",\n    return_documents::Bool = false,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Re-ranks a list of candidate chunks using the Cohere Rerank API. See https://cohere.com/rerank for more details.

Arguments

source


# PromptingTools.Experimental.RAGTools.rerankMethod.
julia
rerank(\n    reranker::RankGPTReranker, index::AbstractDocumentIndex, question::AbstractString,\n    candidates::AbstractCandidateChunks;\n    api_key::AbstractString = PT.OPENAI_API_KEY,\n    model::AbstractString = PT.MODEL_CHAT,\n    verbose::Bool = false,\n    top_n::Integer = length(candidates.scores),\n    unique_chunks::Bool = true,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Re-ranks a list of candidate chunks using the RankGPT algorithm. See https://github.com/sunnweiwei/RankGPT for more details.

It uses LLM calls to rank the candidate chunks.

Arguments

Examples

julia
index = <some index>\nquestion = "What are the best practices for parallel computing in Julia?"\n\ncfg = RAGConfig(; retriever = SimpleRetriever(; reranker = RT.RankGPTReranker()))\nmsg = airag(cfg, index; question, return_all = true)

To get full verbosity of logs, set verbose = 5 (anything higher than 3).

julia
msg = airag(cfg, index; question, return_all = true, verbose = 5)

Reference

[1] Is ChatGPT Good at Search? Investigating Large Language Models as Re-Ranking Agents by W. Sun et al. [2] RankGPT Github

source


# PromptingTools.Experimental.RAGTools.retrieveMethod.
julia
retrieve(retriever::AbstractRetriever,\n    index::AbstractChunkIndex,\n    question::AbstractString;\n    verbose::Integer = 1,\n    top_k::Integer = 100,\n    top_n::Integer = 5,\n    api_kwargs::NamedTuple = NamedTuple(),\n    rephraser::AbstractRephraser = retriever.rephraser,\n    rephraser_kwargs::NamedTuple = NamedTuple(),\n    embedder::AbstractEmbedder = retriever.embedder,\n    embedder_kwargs::NamedTuple = NamedTuple(),\n    processor::AbstractProcessor = retriever.processor,\n    processor_kwargs::NamedTuple = NamedTuple(),\n    finder::AbstractSimilarityFinder = retriever.finder,\n    finder_kwargs::NamedTuple = NamedTuple(),\n    tagger::AbstractTagger = retriever.tagger,\n    tagger_kwargs::NamedTuple = NamedTuple(),\n    filter::AbstractTagFilter = retriever.filter,\n    filter_kwargs::NamedTuple = NamedTuple(),\n    reranker::AbstractReranker = retriever.reranker,\n    reranker_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Retrieves the most relevant chunks from the index for the given question and returns them in the RAGResult object.

This is the main entry point for the retrieval stage of the RAG pipeline. It is often followed by generate! step.

Notes:

The arguments correspond to the steps of the retrieval process (rephrasing, embedding, finding similar docs, tagging, filtering by tags, reranking). You can customize each step by providing a new custom type that dispatches the corresponding function, eg, create your own type struct MyReranker<:AbstractReranker end and define the custom method for it rerank(::MyReranker,...) = ....

Note: Discover available retrieval sub-types for each step with subtypes(AbstractRephraser) and similar for other abstract types.

If you're using locally-hosted models, you can pass the api_kwargs with the url field set to the model's URL and make sure to provide corresponding model kwargs to rephraser, embedder, and tagger to use the custom models (they make AI calls).

Arguments

See also: SimpleRetriever, AdvancedRetriever, build_index, rephrase, get_embeddings, get_keywords, find_closest, get_tags, find_tags, rerank, RAGResult.

Examples

Find the 5 most relevant chunks from the index for the given question.

julia
# assumes you have an existing index `index`\nretriever = SimpleRetriever()\n\nresult = retrieve(retriever,\n    index,\n    "What is the capital of France?",\n    top_n = 5)\n\n# or use the default retriever (same as above)\nresult = retrieve(retriever,\n    index,\n    "What is the capital of France?",\n    top_n = 5)

Apply more advanced retrieval with question rephrasing and reranking (requires COHERE_API_KEY). We will obtain top 100 chunks from embeddings (top_k) and top 5 chunks from reranking (top_n).

julia
retriever = AdvancedRetriever()\n\nresult = retrieve(retriever, index, question; top_k=100, top_n=5)

You can use the retriever to customize your retrieval strategy or directly change the strategy types in the retrieve kwargs!

Example of using locally-hosted model hosted on localhost:8080:

julia
retriever = SimpleRetriever()\nresult = retrieve(retriever, index, question;\n    rephraser_kwargs = (; model = "custom"),\n    embedder_kwargs = (; model = "custom"),\n    tagger_kwargs = (; model = "custom"), api_kwargs = (;\n        url = "http://localhost:8080"))

source


# PromptingTools.Experimental.RAGTools.run_qa_evalsMethod.
julia
run_qa_evals(index::AbstractChunkIndex, qa_items::AbstractVector{<:QAEvalItem};\n    api_kwargs::NamedTuple = NamedTuple(),\n    airag_kwargs::NamedTuple = NamedTuple(),\n    qa_evals_kwargs::NamedTuple = NamedTuple(),\n    verbose::Bool = true, parameters_dict::Dict{Symbol, <:Any} = Dict{Symbol, Any}())

Evaluates a vector of QAEvalItems and returns a vector QAEvalResult. This function assesses the relevance and accuracy of the answers generated in a QA evaluation context.

See ?run_qa_evals for more details.

Arguments

Returns

Vector{QAEvalResult}: Vector of evaluation results that includes various scores and metadata related to the QA evaluation.

Example

julia
index = "..." # Assuming a proper index is defined\nqa_items = [QAEvalItem(question="What is the capital of France?", answer="Paris", context="France is a country in Europe."),\n            QAEvalItem(question="What is the capital of Germany?", answer="Berlin", context="Germany is a country in Europe.")]\n\n# Let's run a test with `top_k=5`\nresults = run_qa_evals(index, qa_items; airag_kwargs=(;top_k=5), parameters_dict=Dict(:top_k => 5))\n\n# Filter out the "failed" calls\nresults = filter(x->!isnothing(x.answer_score), results);\n\n# See average judge score\nmean(x->x.answer_score, results)

source


# PromptingTools.Experimental.RAGTools.run_qa_evalsMethod.
julia
run_qa_evals(qa_item::QAEvalItem, ctx::RAGResult; verbose::Bool = true,\n             parameters_dict::Dict{Symbol, <:Any}, judge_template::Symbol = :RAGJudgeAnswerFromContext,\n             model_judge::AbstractString, api_kwargs::NamedTuple = NamedTuple()) -> QAEvalResult

Evaluates a single QAEvalItem using RAG details (RAGResult) and returns a QAEvalResult structure. This function assesses the relevance and accuracy of the answers generated in a QA evaluation context.

Arguments

Returns

QAEvalResult: An evaluation result that includes various scores and metadata related to the QA evaluation.

Notes

Examples

Evaluating a QA pair using a specific context and model:

julia
qa_item = QAEvalItem(question="What is the capital of France?", answer="Paris", context="France is a country in Europe.")\nctx = RAGResult(source="Wikipedia", context="France is a country in Europe.", answer="Paris")\nparameters_dict = Dict("param1" => "value1", "param2" => "value2")\n\neval_result = run_qa_evals(qa_item, ctx, parameters_dict=parameters_dict, model_judge="MyAIJudgeModel")

source


# PromptingTools.Experimental.RAGTools.score_retrieval_hitMethod.

Returns 1.0 if context overlaps or is contained within any of the candidate_context

source


# PromptingTools.Experimental.RAGTools.score_retrieval_rankMethod.

Returns Integer rank of the position where context overlaps or is contained within a candidate_context

source


# PromptingTools.Experimental.RAGTools.score_to_unit_scaleMethod.
julia
score_to_unit_scale(x::AbstractVector{T}) where T<:Real

Shift and scale a vector of scores to the unit scale [0, 1].

Example

julia
x = [1.0, 2.0, 3.0, 4.0, 5.0]\nscaled_x = score_to_unit_scale(x)

source


# PromptingTools.Experimental.RAGTools.set_node_style!Method.
julia
set_node_style!(::TrigramAnnotater, node::AnnotatedNode;\n    low_threshold::Float64 = 0.0, medium_threshold::Float64 = 0.5, high_threshold::Float64 = 1.0,\n    default_styler::AbstractAnnotationStyler = Styler(),\n    low_styler::AbstractAnnotationStyler = Styler(color = :magenta, bold = false),\n    medium_styler::AbstractAnnotationStyler = Styler(color = :blue, bold = false),\n    high_styler::AbstractAnnotationStyler = Styler(color = :nothing, bold = false),\n    bold_multihits::Bool = false)

Sets style of node based on the provided rules

source


# PromptingTools.Experimental.RAGTools.setpropertynestedMethod.
julia
setpropertynested(nt::NamedTuple, parent_keys::Vector{Symbol},\n    key::Symbol,\n    value

)

Setter for a property key in a nested NamedTuple nt, where the property is nested to a key in parent_keys.

Useful for nested kwargs where we want to change some property in parent_keys subset (eg, model in retriever_kwargs).

Examples

julia
kw = (; abc = (; def = "x"))\nsetpropertynested(kw, [:abc], :def, "y")\n# Output: (abc = (def = "y",),)

Practical example of changing all model keys in CHAT-based steps in the pipeline:

julia
# changes :model to "gpt4t" whenever the parent key is in the below list (chat-based steps)\nsetpropertynested(kwargs,\n    [:rephraser_kwargs, :tagger_kwargs, :answerer_kwargs, :refiner_kwargs],\n    :model, "gpt4t")

Or changing an embedding model (across both indexer and retriever steps, because it's same step name):

julia
kwargs = setpropertynested(\n        kwargs, [:embedder_kwargs],\n        :model, "text-embedding-3-large"\n    )

source


# PromptingTools.Experimental.RAGTools.split_into_code_and_sentencesMethod.
julia
split_into_code_and_sentences(input::Union{String, SubString{String}})

Splits text block into code or text and sub-splits into units.

If code block, it splits by newline but keep the group_id the same (to have the same source) If text block, splits into sentences, bullets, etc., provides different group_id (to have different source)

source


# PromptingTools.Experimental.RAGTools.tags_extractMethod.
julia
tags_extract(item::Tag)\ntags_extract(tags::Vector{Tag})

Extracts the Tag item into a string of the form category:::value (lowercased and spaces replaced with underscores).

Example

julia
msg = aiextract(:RAGExtractMetadataShort; return_type=MaybeTags, text="I like package DataFrames", instructions="None.")\nmetadata = tags_extract(msg.content.items)

source


# PromptingTools.Experimental.RAGTools.token_with_boundariesMethod.
julia
token_with_boundaries(\n    prev_token::Union{Nothing, AbstractString}, curr_token::AbstractString,\n    next_token::Union{Nothing, AbstractString})

Joins the three tokens together. Useful to add boundary tokens (like spaces vs brackets) to the curr_token to improve the matched context (ie, separate partial matches from exact match)

source


# PromptingTools.Experimental.RAGTools.tokenizeMethod.
julia
tokenize(input::Union{String, SubString{String}})

Tokenizes provided input by spaces, special characters or Julia symbols (eg, =>).

Unlike other tokenizers, it aims to lossless - ie, keep both the separated text and the separators.

source


# PromptingTools.Experimental.RAGTools.translate_positions_to_parentMethod.
julia
translate_positions_to_parent(index::AbstractChunkIndex, positions::AbstractVector{<:Integer})

Translate positions to the parent index. Useful to convert between positions in a view and the original index.

Used whenever a chunkdata() is used to re-align positions in case index is a view.

source


# PromptingTools.Experimental.RAGTools.translate_positions_to_parentMethod.
julia
translate_positions_to_parent(\n    index::SubChunkIndex, pos::AbstractVector{<:Integer})

Translate positions to the parent index. Useful to convert between positions in a view and the original index.

Used whenever a chunkdata() or tags() are used to re-align positions to the "parent" index.

source


# PromptingTools.Experimental.RAGTools.trigram_support!Method.
julia
trigram_support!(parent_node::AnnotatedNode,\n    context_trigrams::AbstractVector, trigram_func::F1 = trigrams, token_transform::F2 = identity;\n    skip_trigrams::Bool = false, min_score::Float64 = 0.5,\n    min_source_score::Float64 = 0.25,\n    stop_words::AbstractVector{<:String} = STOPWORDS,\n    styler_kwargs...) where {F1 <: Function, F2 <: Function}

Find if the parent_node.content is supported by the provided context_trigrams.

Logic:

For diagnostics, you can use AbstractTrees.print_tree(parent_node) to see the tree structure of each token and its score.

Example

julia
\nnode = AnnotatedNode(content = "xyz")  trigram_support!(node, context_trigrams) # updates node.children! ```\n\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/Experimental/RAGTools/annotation.jl#L215-L244)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.Experimental.RAGTools.trigrams-Tuple{AbstractString}' href='#PromptingTools.Experimental.RAGTools.trigrams-Tuple{AbstractString}'>#</a>&nbsp;<b><u>PromptingTools.Experimental.RAGTools.trigrams</u></b> &mdash; <i>Method</i>.\n\n\n\n\n```julia\ntrigrams(input_string::AbstractString; add_word::AbstractString = "")

Splits provided input_string into a vector of trigrams (combination of three consecutive characters found in the input_string).

If add_word is provided, it is added to the resulting array. Useful to add the full word itself to the resulting array for exact match.

source


# PromptingTools.Experimental.RAGTools.trigrams_hashedMethod.
julia
trigrams_hashed(input_string::AbstractString; add_word::AbstractString = "")

Splits provided input_string into a Set of hashed trigrams (combination of three consecutive characters found in the input_string).

It is more efficient for lookups in large strings (eg, >100K characters).

If add_word is provided, it is added to the resulting array to hash. Useful to add the full word itself to the resulting array for exact match.

source


# PromptingTools.last_messageMethod.
julia
PT.last_message(result::RAGResult)

Extract the last message from the RAGResult. It looks for final_answer first, then answer fields in the conversations dictionary. Returns nothing if not found.

source


# PromptingTools.last_outputMethod.

Extracts the last output (generated text answer) from the RAGResult.

source


# PromptingTools.pprintMethod.
julia
PromptingTools.pprint(\n    io::IO, node::AbstractAnnotatedNode;\n    text_width::Int = displaysize(io)[2], add_newline::Bool = true)

Pretty print the node to the io stream, including all its children

Supports only node.style::Styler for now.

source


# PromptingTools.pprintMethod.
julia
PT.pprint(\n    io::IO, r::AbstractRAGResult; add_context::Bool = false,\n    text_width::Int = displaysize(io)[2], annotater_kwargs...)

Pretty print the RAG result r to the given io stream.

If add_context is true, the context will be printed as well. The text_width parameter can be used to control the width of the output.

You can provide additional keyword arguments to the annotater, eg, add_sources, add_scores, min_score, etc. See annotate_support for more details.

source


', 284) + createStaticVNode('

Reference for RAGTools

# PromptingTools.Experimental.RAGToolsModule.
julia
RAGTools

Provides Retrieval-Augmented Generation (RAG) functionality.

Requires: LinearAlgebra, SparseArrays, Unicode, PromptingTools for proper functionality.

This module is experimental and may change at any time. It is intended to be moved to a separate package in the future.

source


# PromptingTools.Experimental.RAGTools.AbstractCandidateChunksType.
julia
AbstractCandidateChunks

Abstract type for storing candidate chunks, ie, references to items in a AbstractChunkIndex.

Return type from find_closest and find_tags functions.

Required Fields

source


# PromptingTools.Experimental.RAGTools.AbstractChunkIndexType.
julia
AbstractChunkIndex <: AbstractDocumentIndex

Main abstract type for storing document chunks and their embeddings. It also stores tags and sources for each chunk.

Required Fields

source


# PromptingTools.Experimental.RAGTools.AbstractGeneratorType.
julia
AbstractGenerator <: AbstractGenerationMethod

Abstract type for generating an answer with generate! (use to change the process / return type of generate).

Required Fields

source


# PromptingTools.Experimental.RAGTools.AbstractIndexBuilderType.
julia
AbstractIndexBuilder

Abstract type for building an index with build_index (use to change the process / return type of build_index).

Required Fields

source


# PromptingTools.Experimental.RAGTools.AbstractMultiIndexType.
julia
AbstractMultiIndex <: AbstractDocumentIndex

Experimental abstract type for storing multiple document indexes. Not yet implemented.

source


# PromptingTools.Experimental.RAGTools.AbstractRetrieverType.
julia
AbstractRetriever <: AbstractRetrievalMethod

Abstract type for retrieving chunks from an index with retrieve (use to change the process / return type of retrieve).

Required Fields

source


# PromptingTools.Experimental.RAGTools.AdvancedGeneratorType.
julia
AdvancedGenerator <: AbstractGenerator

Default implementation for generate!. It simply enumerates context snippets and runs aigenerate (no refinement).

It uses ContextEnumerator, SimpleAnswerer, SimpleRefiner, and NoPostprocessor as default contexter, answerer, refiner, and postprocessor.

source


# PromptingTools.Experimental.RAGTools.AdvancedRetrieverType.
julia
AdvancedRetriever <: AbstractRetriever

Dispatch for retrieve with advanced retrieval methods to improve result quality. Compared to SimpleRetriever, it adds rephrasing the query and reranking the results.

Fields

source


# PromptingTools.Experimental.RAGTools.AllTagFilterType.
julia
AllTagFilter <: AbstractTagFilter

Finds the chunks that have ALL OF the specified tag(s). A method for find_tags.

source


# PromptingTools.Experimental.RAGTools.AnnotatedNodeType.
julia
AnnotatedNode{T}  <: AbstractAnnotatedNode

A node to add annotations to the generated answer in airag

Annotations can be: sources, scores, whether its supported or not by the context, etc.

Fields

source


# PromptingTools.Experimental.RAGTools.AnyTagFilterType.
julia
AnyTagFilter <: AbstractTagFilter

Finds the chunks that have ANY OF the specified tag(s). A method for find_tags.

source


# PromptingTools.Experimental.RAGTools.BM25SimilarityType.
julia
BM25Similarity <: AbstractSimilarityFinder

Finds the closest chunks to a query embedding by measuring the BM25 similarity between the query and the chunks' embeddings in binary form. A method for find_closest.

Reference: Wikipedia: BM25. Implementation follows: The Next Generation of Lucene Relevance.

source


# PromptingTools.Experimental.RAGTools.BatchEmbedderType.
julia
BatchEmbedder <: AbstractEmbedder

Default embedder for get_embeddings functions. It passes individual documents to be embedded in chunks to aiembed.

source


# PromptingTools.Experimental.RAGTools.BinaryBatchEmbedderType.
julia
BinaryBatchEmbedder <: AbstractEmbedder

Same as BatchEmbedder but reduces the embeddings matrix to a binary form (eg, BitMatrix). Defines a method for get_embeddings.

Reference: HuggingFace: Embedding Quantization.

source


# PromptingTools.Experimental.RAGTools.BinaryCosineSimilarityType.
julia
BinaryCosineSimilarity <: AbstractSimilarityFinder

Finds the closest chunks to a query embedding by measuring the Hamming distance AND cosine similarity between the query and the chunks' embeddings in binary form. A method for find_closest.

It follows the two-pass approach:

Reference: HuggingFace: Embedding Quantization.

source


# PromptingTools.Experimental.RAGTools.BitPackedBatchEmbedderType.
julia
BitPackedBatchEmbedder <: AbstractEmbedder

Same as BatchEmbedder but reduces the embeddings matrix to a binary form packed in UInt64 (eg, BitMatrix.chunks). Defines a method for get_embeddings.

See also utilities pack_bits and unpack_bits to move between packed/non-packed binary forms.

Reference: HuggingFace: Embedding Quantization.

source


# PromptingTools.Experimental.RAGTools.BitPackedCosineSimilarityType.
julia
BitPackedCosineSimilarity <: AbstractSimilarityFinder

Finds the closest chunks to a query embedding by measuring the Hamming distance AND cosine similarity between the query and the chunks' embeddings in binary form. A method for find_closest.

The difference to BinaryCosineSimilarity is that the binary values are packed into UInt64, which is more efficient.

Reference: HuggingFace: Embedding Quantization. Implementation of hamming_distance is based on TinyRAG.

source


# PromptingTools.Experimental.RAGTools.CandidateChunksType.
julia
CandidateChunks

A struct for storing references to chunks in the given index (identified by index_id) called positions and scores holding the strength of similarity (=1 is the highest, most similar). It's the result of the retrieval stage of RAG.

Fields

source


# PromptingTools.Experimental.RAGTools.ChunkEmbeddingsIndexType.
julia
ChunkEmbeddingsIndex

Main struct for storing document chunks and their embeddings. It also stores tags and sources for each chunk.

Previously, this struct was called ChunkIndex.

Fields

source


# PromptingTools.Experimental.RAGTools.ChunkKeywordsIndexType.
julia
ChunkKeywordsIndex

Struct for storing chunks of text and associated keywords for BM25 similarity search.

Fields

Example

We can easily create a keywords-based index from a standard embeddings-based index.

julia
\n# Let's assume we have a standard embeddings-based index\nindex = build_index(SimpleIndexer(), texts; chunker_kwargs = (; max_length=10))\n\n# Creating an additional index for keyword-based search (BM25), is as simple as\nindex_keywords = ChunkKeywordsIndex(index)\n\n# We can immediately create a MultiIndex (a hybrid index holding both indices)\nmulti_index = MultiIndex([index, index_keywords])

You can also build the index via build_index

julia
# given some sentences and sources\nindex_keywords = build_index(KeywordsIndexer(), sentences; chunker_kwargs=(; sources))\n\n# Retrive closest chunks with\nretriever = SimpleBM25Retriever()\nresult = retrieve(retriever, index_keywords, "What are the best practices for parallel computing in Julia?")\nresult.context

If you want to use airag, don't forget to specify the config to make sure keywords are processed (ie, tokenized) and that BM25 is used for searching candidates

julia
cfg = RAGConfig(; retriever = SimpleBM25Retriever());\nairag(cfg, index_keywords;\n    question = "What are the best practices for parallel computing in Julia?")

source


# PromptingTools.Experimental.RAGTools.ChunkKeywordsIndexMethod.
julia
ChunkKeywordsIndex(\n    [processor::AbstractProcessor=KeywordsProcessor(),] index::ChunkEmbeddingsIndex; verbose::Int = 1,\n    index_id = gensym("ChunkKeywordsIndex"), processor_kwargs...)

Convenience method to quickly create a ChunkKeywordsIndex from an existing ChunkEmbeddingsIndex.

Example

julia
\n# Let's assume we have a standard embeddings-based index\nindex = build_index(SimpleIndexer(), texts; chunker_kwargs = (; max_length=10))\n\n# Creating an additional index for keyword-based search (BM25), is as simple as\nindex_keywords = ChunkKeywordsIndex(index)\n\n# We can immediately create a MultiIndex (a hybrid index holding both indices)\nmulti_index = MultiIndex([index, index_keywords])

source


# PromptingTools.Experimental.RAGTools.CohereRerankerType.
julia
CohereReranker <: AbstractReranker

Rerank strategy using the Cohere Rerank API. Requires an API key. A method for rerank.

source


# PromptingTools.Experimental.RAGTools.ContextEnumeratorType.
julia
ContextEnumerator <: AbstractContextBuilder

Default method for build_context! method. It simply enumerates the context snippets around each position in candidates. When possibly, it will add surrounding chunks (from the same source).

source


# PromptingTools.Experimental.RAGTools.CosineSimilarityType.
julia
CosineSimilarity <: AbstractSimilarityFinder

Finds the closest chunks to a query embedding by measuring the cosine similarity between the query and the chunks' embeddings. A method for find_closest (see the docstring for more details and usage example).

source


# PromptingTools.Experimental.RAGTools.DocumentTermMatrixType.
julia
DocumentTermMatrix{T<:AbstractString}

A sparse matrix of term frequencies and document lengths to allow calculation of BM25 similarity scores.

source


# PromptingTools.Experimental.RAGTools.FileChunkerType.
julia
FileChunker <: AbstractChunker

Chunker when you provide file paths to get_chunks functions.

Ie, the inputs will be validated first (eg, file exists, etc) and then read into memory.

Set as default chunker in get_chunks functions.

source


# PromptingTools.Experimental.RAGTools.FlashRankerType.
julia
FlashRanker <: AbstractReranker

Rerank strategy using the package FlashRank.jl and local models. A method for rerank.

You must first import the FlashRank.jl package. To automatically download any required models, set your ENV["DATADEPS_ALWAYS_ACCEPT"] = true (see DataDeps for more details).

Example

julia
using FlashRank\n\n# Wrap the model to be a valid Ranker recognized by RAGTools\n# It will be provided to the airag/rerank function to avoid instantiating it on every call\nreranker = FlashRank.RankerModel(:mini) |> FlashRanker\n# You can choose :tiny or :mini\n\n## Apply to the pipeline configuration, eg, \ncfg = RAGConfig(; retriever = AdvancedRetriever(; reranker))\n\n# Ask a question (assumes you have some `index`)\nquestion = "What are the best practices for parallel computing in Julia?"\nresult = airag(cfg, index; question, return_all = true)

source


# PromptingTools.Experimental.RAGTools.HTMLStylerType.
julia
HTMLStyler

Defines styling via classes (attribute class) and styles (attribute style) for HTML formatting of AbstractAnnotatedNode

source


# PromptingTools.Experimental.RAGTools.HyDERephraserType.
julia
HyDERephraser <: AbstractRephraser

Rephraser implemented using the provided AI Template (eg, ...) and standard chat model. A method for rephrase.

It uses a prompt-based rephrasing method called HyDE (Hypothetical Document Embedding), where instead of looking for an embedding of the question, we look for the documents most similar to a synthetic passage that would be a good answer to our question.

Reference: Arxiv paper.

source


# PromptingTools.Experimental.RAGTools.JudgeAllScoresType.

final_rating is the average of all scoring criteria. Explain the final_rating in rationale

source


# PromptingTools.Experimental.RAGTools.JudgeRatingType.

Provide the final_rating between 1-5. Provide the rationale for it.

source


# PromptingTools.Experimental.RAGTools.KeywordsIndexerType.
julia
KeywordsIndexer <: AbstractIndexBuilder

Keyword-based index (BM25) to be returned by build_index.

It uses TextChunker, KeywordsProcessor, and NoTagger as default chunker, processor, and tagger.

source


# PromptingTools.Experimental.RAGTools.KeywordsProcessorType.
julia
KeywordsProcessor <: AbstractProcessor

Default keywords processor for get_keywords functions. It normalizes the documents, tokenizes them and builds a DocumentTermMatrix.

source


# PromptingTools.Experimental.RAGTools.MultiCandidateChunksType.
julia
MultiCandidateChunks

A struct for storing references to multiple sets of chunks across different indices. Each set of chunks is identified by an index_id in index_ids, with corresponding positions in the index and scores indicating the strength of similarity.

This struct is useful for scenarios where candidates are drawn from multiple indices, and there is a need to keep track of which candidates came from which index.

Fields

source


# PromptingTools.Experimental.RAGTools.MultiFinderType.
julia
MultiFinder <: AbstractSimilarityFinder

Composite finder for MultiIndex where we want to set multiple finders for each index. A method for find_closest. Positions correspond to indexes(::MultiIndex).

source


# PromptingTools.Experimental.RAGTools.MultiIndexType.
julia
MultiIndex

Composite index that stores multiple ChunkIndex objects and their embeddings.

Fields

Use accesor indexes to access the individual indexes.

Examples

We can create a MultiIndex from a vector of AbstractChunkIndex objects.

julia
index = build_index(SimpleIndexer(), texts; chunker_kwargs = (; sources))\nindex_keywords = ChunkKeywordsIndex(index) # same chunks as above but adds BM25 instead of embeddings\n\nmulti_index = MultiIndex([index, index_keywords])

To use airag with different types of indices, we need to specify how to find the closest items for each index

julia
# Cosine similarity for embeddings and BM25 for keywords, same order as indexes in MultiIndex\nfinder = RT.MultiFinder([RT.CosineSimilarity(), RT.BM25Similarity()])\n\n# Notice that we add `processor` to make sure keywords are processed (ie, tokenized) as well\ncfg = RAGConfig(; retriever = SimpleRetriever(; processor = RT.KeywordsProcessor(), finder))\n\n# Ask questions\nmsg = airag(cfg, multi_index; question = "What are the best practices for parallel computing in Julia?")\npprint(msg) # prettify the answer

source


# PromptingTools.Experimental.RAGTools.NoEmbedderType.
julia
NoEmbedder <: AbstractEmbedder

No-op embedder for get_embeddings functions. It returns nothing.

source


# PromptingTools.Experimental.RAGTools.NoPostprocessorType.
julia
NoPostprocessor <: AbstractPostprocessor

Default method for postprocess! method. A passthrough option that returns the result without any changes.

Overload this method to add custom postprocessing steps, eg, logging, saving conversations to disk, etc.

source


# PromptingTools.Experimental.RAGTools.NoProcessorType.
julia
NoProcessor <: AbstractProcessor

No-op processor for get_keywords functions. It returns the inputs as is.

source


# PromptingTools.Experimental.RAGTools.NoRefinerType.
julia
NoRefiner <: AbstractRefiner

Default method for refine! method. A passthrough option that returns the result.answer without any changes.

source


# PromptingTools.Experimental.RAGTools.NoRephraserType.
julia
NoRephraser <: AbstractRephraser

No-op implementation for rephrase, which simply passes the question through.

source


# PromptingTools.Experimental.RAGTools.NoRerankerType.
julia
NoReranker <: AbstractReranker

No-op implementation for rerank, which simply passes the candidate chunks through.

source


# PromptingTools.Experimental.RAGTools.NoTagFilterType.
julia
NoTagFilter <: AbstractTagFilter

No-op implementation for find_tags, which simply returns all chunks.

source


# PromptingTools.Experimental.RAGTools.NoTaggerType.
julia
NoTagger <: AbstractTagger

No-op tagger for get_tags functions. It returns (nothing, nothing).

source


# PromptingTools.Experimental.RAGTools.OpenTaggerType.
julia
OpenTagger <: AbstractTagger

Tagger for get_tags functions, which generates possible tags for each chunk via aiextract. You can customize it via prompt template (default: :RAGExtractMetadataShort), but it's quite open-ended (ie, AI decides the possible tags).

source


# PromptingTools.Experimental.RAGTools.PassthroughTaggerType.
julia
PassthroughTagger <: AbstractTagger

Tagger for get_tags functions, which passes tags directly as Vector of Vectors of strings (ie, tags[i] is the tags for docs[i]).

source


# PromptingTools.Experimental.RAGTools.RAGConfigType.
julia
RAGConfig <: AbstractRAGConfig

Default configuration for RAG. It uses SimpleIndexer, SimpleRetriever, and SimpleGenerator as default components. Provided as the first argument in airag.

To customize the components, replace corresponding fields for each step of the RAG pipeline (eg, use subtypes(AbstractIndexBuilder) to find the available options).

source


# PromptingTools.Experimental.RAGTools.RAGResultType.
julia
RAGResult

A struct for debugging RAG answers. It contains the question, answer, context, and the candidate chunks at each step of the RAG pipeline.

Think of the flow as question -> rephrased_questions -> answer -> final_answer with the context and candidate chunks helping along the way.

Fields

See also: pprint (pretty printing), annotate_support (for annotating the answer)

source


# PromptingTools.Experimental.RAGTools.RankGPTRerankerType.
julia
RankGPTReranker <: AbstractReranker

Rerank strategy using the RankGPT algorithm (calling LLMs). A method for rerank.

Reference

[1] Is ChatGPT Good at Search? Investigating Large Language Models as Re-Ranking Agents by W. Sun et al. [2] RankGPT Github

source


# PromptingTools.Experimental.RAGTools.RankGPTResultType.
julia
RankGPTResult

Results from the RankGPT algorithm.

Fields

source


# PromptingTools.Experimental.RAGTools.SimpleAnswererType.
julia
SimpleAnswerer <: AbstractAnswerer

Default method for answer! method. Generates an answer using the aigenerate function with the provided context and question.

source


# PromptingTools.Experimental.RAGTools.SimpleBM25RetrieverType.
julia
SimpleBM25Retriever <: AbstractRetriever

Keyword-based implementation for retrieve. It does a simple similarity search via BM25Similarity and returns the results.

Make sure to use consistent processor and tagger with the Preparation Stage (build_index)!

Fields

source


# PromptingTools.Experimental.RAGTools.SimpleGeneratorType.
julia
SimpleGenerator <: AbstractGenerator

Default implementation for generate. It simply enumerates context snippets and runs aigenerate (no refinement).

It uses ContextEnumerator, SimpleAnswerer, NoRefiner, and NoPostprocessor as default contexter, answerer, refiner, and postprocessor.

source


# PromptingTools.Experimental.RAGTools.SimpleIndexerType.
julia
SimpleIndexer <: AbstractIndexBuilder

Default implementation for build_index.

It uses TextChunker, BatchEmbedder, and NoTagger as default chunker, embedder, and tagger.

source


# PromptingTools.Experimental.RAGTools.SimpleRefinerType.
julia
SimpleRefiner <: AbstractRefiner

Refines the answer using the same context previously provided via the provided prompt template. A method for refine!.

source


# PromptingTools.Experimental.RAGTools.SimpleRephraserType.
julia
SimpleRephraser <: AbstractRephraser

Rephraser implemented using the provided AI Template (eg, ...) and standard chat model. A method for rephrase.

source


# PromptingTools.Experimental.RAGTools.SimpleRetrieverType.
julia
SimpleRetriever <: AbstractRetriever

Default implementation for retrieve function. It does a simple similarity search via CosineSimilarity and returns the results.

Make sure to use consistent embedder and tagger with the Preparation Stage (build_index)!

Fields

source


# PromptingTools.Experimental.RAGTools.StylerType.
julia
Styler

Defines styling keywords for printstyled for each AbstractAnnotatedNode

source


# PromptingTools.Experimental.RAGTools.SubChunkIndexType.
julia
SubChunkIndex

A view of the parent index with respect to the chunks (and chunk-aligned fields). All methods and accessors working for AbstractChunkIndex also work for SubChunkIndex. It does not yet work for MultiIndex.

Fields

Example

julia
cc = CandidateChunks(index.id, 1:10)\nsub_index = @view(index[cc])

You can use SubChunkIndex to access chunks or sources (and other fields) from a parent index, eg,

julia
RT.chunks(sub_index)\nRT.sources(sub_index)\nRT.chunkdata(sub_index) # slice of embeddings\nRT.embeddings(sub_index) # slice of embeddings\nRT.tags(sub_index) # slice of tags\nRT.tags_vocab(sub_index) # unchanged, identical to parent version\nRT.extras(sub_index) # slice of extras

Access the parent index that the positions correspond to

julia
parent(sub_index)\nRT.positions(sub_index)

source


# PromptingTools.Experimental.RAGTools.SubDocumentTermMatrixType.

A partial view of a DocumentTermMatrix, tf is MATERIALIZED for performance and fewer allocations.

source


# PromptingTools.Experimental.RAGTools.TavilySearchRefinerType.
julia
TavilySearchRefiner <: AbstractRefiner

Refines the answer by executing a web search using the Tavily API. This method aims to enhance the answer's accuracy and relevance by incorporating information retrieved from the web. A method for refine!.

source


# PromptingTools.Experimental.RAGTools.TextChunkerType.
julia
TextChunker <: AbstractChunker

Chunker when you provide text to get_chunks functions. Inputs are directly chunked

source


# PromptingTools.Experimental.RAGTools.TrigramAnnotaterType.
julia
TrigramAnnotater

Annotation method where we score answer versus each context based on word-level trigrams that match.

It's very simple method (and it can loose some semantic meaning in longer sequences like negative), but it works reasonably well for both text and code.

source


# PromptingTools.Experimental.RAGTools._normalizeFunction.

Shortcut to LinearAlgebra.normalize. Provided in the package extension RAGToolsExperimentalExt (Requires SparseArrays, Unicode, and LinearAlgebra)

source


# PromptingTools.Experimental.RAGTools.add_node_metadata!Method.
julia
add_node_metadata!(annotater::TrigramAnnotater,\n    root::AnnotatedNode; add_sources::Bool = true, add_scores::Bool = true,\n    sources::Union{Nothing, AbstractVector{<:AbstractString}} = nothing)

Adds metadata to the children of root. Metadata includes sources and scores, if requested.

Optionally, it can add a list of sources at the end of the printed text.

The metadata is added by inserting new nodes in the root children list (with no children of its own to be printed out).

source


# PromptingTools.Experimental.RAGTools.airagMethod.
julia
airag(cfg::AbstractRAGConfig, index::AbstractDocumentIndex;\n    question::AbstractString,\n    verbose::Integer = 1, return_all::Bool = false,\n    api_kwargs::NamedTuple = NamedTuple(),\n    retriever::AbstractRetriever = cfg.retriever,\n    retriever_kwargs::NamedTuple = NamedTuple(),\n    generator::AbstractGenerator = cfg.generator,\n    generator_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0))

High-level wrapper for Retrieval-Augmented Generation (RAG), it combines together the retrieve and generate! steps which you can customize if needed.

The simplest version first finds the relevant chunks in index for the question and then sends these chunks to the AI model to help with generating a response to the question.

To customize the components, replace the types (retriever, generator) of the corresponding step of the RAG pipeline - or go into sub-routines within the steps. Eg, use subtypes(AbstractRetriever) to find the available options.

Arguments

Returns

See also build_index, retrieve, generate!, RAGResult, getpropertynested, setpropertynested, merge_kwargs_nested, ChunkKeywordsIndex.

Examples

Using airag to get a response for a question:

julia
index = build_index(...)  # create an index\nquestion = "How to make a barplot in Makie.jl?"\nmsg = airag(index; question)

To understand the details of the RAG process, use return_all=true

julia
msg, details = airag(index; question, return_all = true)\n# details is a RAGDetails object with all the internal steps of the `airag` function

You can also pretty-print details to highlight generated text vs text that is supported by context. It also includes annotations of which context was used for each part of the response (where available).

julia
PT.pprint(details)

Example with advanced retrieval (with question rephrasing and reranking (requires COHERE_API_KEY). We will obtain top 100 chunks from embeddings (top_k) and top 5 chunks from reranking (top_n). In addition, it will be done with a "custom" locally-hosted model.

julia
cfg = RAGConfig(; retriever = AdvancedRetriever())\n\n# kwargs will be big and nested, let's prepare them upfront\n# we specify "custom" model for each component that calls LLM\nkwargs = (\n    retriever_kwargs = (;\n        top_k = 100,\n        top_n = 5,\n        rephraser_kwargs = (;\n            model = "custom"),\n        embedder_kwargs = (;\n            model = "custom"),\n        tagger_kwargs = (;\n            model = "custom")),\n    generator_kwargs = (;\n        answerer_kwargs = (;\n            model = "custom"),\n        refiner_kwargs = (;\n            model = "custom")),\n    api_kwargs = (;\n        url = "http://localhost:8080"))\n\nresult = airag(cfg, index, question; kwargs...)

If you want to use hybrid retrieval (embeddings + BM25), you can easily create an additional index based on keywords and pass them both into a MultiIndex.

You need to provide an explicit config, so the pipeline knows how to handle each index in the search similarity phase (finder).

julia
index = # your existing index\n\n# create the multi-index with the keywords index\nindex_keywords = ChunkKeywordsIndex(index)\nmulti_index = MultiIndex([index, index_keywords])\n\n# define the similarity measures for the indices that you have (same order)\nfinder = RT.MultiFinder([RT.CosineSimilarity(), RT.BM25Similarity()])\ncfg = RAGConfig(; retriever=AdvancedRetriever(; processor=RT.KeywordsProcessor(), finder))\n\n# Run the pipeline with the new hybrid retrieval (return the `RAGResult` to see the details)\nresult = airag(cfg, multi_index; question, return_all=true)\n\n# Pretty-print the result\nPT.pprint(result)

For easier manipulation of nested kwargs, see utilities getpropertynested, setpropertynested, merge_kwargs_nested.

source


# PromptingTools.Experimental.RAGTools.align_node_styles!Method.
julia
align_node_styles!(annotater::TrigramAnnotater, nodes::AbstractVector{<:AnnotatedNode}; kwargs...)

Aligns the styles of the nodes based on the surrounding nodes ("fill-in-the-middle").

If the node has no score, but the surrounding nodes have the same style, the node will inherit the style of the surrounding nodes.

source


# PromptingTools.Experimental.RAGTools.annotate_supportMethod.
julia
annotate_support(annotater::TrigramAnnotater, answer::AbstractString,\n    context::AbstractVector; min_score::Float64 = 0.5,\n    skip_trigrams::Bool = true, hashed::Bool = true,\n    sources::Union{Nothing, AbstractVector{<:AbstractString}} = nothing,\n    min_source_score::Float64 = 0.25,\n    add_sources::Bool = true,\n    add_scores::Bool = true, kwargs...)

Annotates the answer with the overlap/what's supported in context and returns the annotated tree of nodes representing the answer

Returns a "root" node with children nodes representing the sentences/code blocks in the answer. Only the "leaf" nodes are to be printed (to avoid duplication), "leaf" nodes are those with NO children.

Default logic:

Arguments

Example

julia
annotater = TrigramAnnotater()\ncontext = [\n    "This is a test context.", "Another context sentence.", "Final piece of context."]\nanswer = "This is a test context. Another context sentence."\n\nannotated_root = annotate_support(annotater, answer, context)\npprint(annotated_root) # pretty print the annotated tree

source


# PromptingTools.Experimental.RAGTools.annotate_supportMethod.
julia
annotate_support(\n    annotater::TrigramAnnotater, result::AbstractRAGResult; min_score::Float64 = 0.5,\n    skip_trigrams::Bool = true, hashed::Bool = true,\n    min_source_score::Float64 = 0.25,\n    add_sources::Bool = true,\n    add_scores::Bool = true, kwargs...)

Dispatch for annotate_support for AbstractRAGResult type. It extracts the final_answer and context from the result and calls annotate_support with them.

See annotate_support for more details.

Example

julia
res = RAGResult(; question = "", final_answer = "This is a test.",\n    context = ["Test context.", "Completely different"])\nannotated_root = annotate_support(annotater, res)\nPT.pprint(annotated_root)

source


# PromptingTools.Experimental.RAGTools.answer!Method.
julia
answer!(\n    answerer::SimpleAnswerer, index::AbstractDocumentIndex, result::AbstractRAGResult;\n    model::AbstractString = PT.MODEL_CHAT, verbose::Bool = true,\n    template::Symbol = :RAGAnswerFromContext,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Generates an answer using the aigenerate function with the provided result.context and result.question.

Returns

Arguments

source


# PromptingTools.Experimental.RAGTools.build_contextMethod.
julia
build_context(contexter::ContextEnumerator,\n    index::AbstractDocumentIndex, candidates::AbstractCandidateChunks;\n    verbose::Bool = true,\n    chunks_window_margin::Tuple{Int, Int} = (1, 1), kwargs...)\n\n    build_context!(contexter::ContextEnumerator,\n    index::AbstractDocumentIndex, result::AbstractRAGResult; kwargs...)

Build context strings for each position in candidates considering a window margin around each position. If mutating version is used (build_context!), it will use result.reranked_candidates to update the result.context field.

Arguments

Returns

Examples

julia
index = ChunkIndex(...)  # Assuming a proper index is defined\ncandidates = CandidateChunks(index.id, [2, 4], [0.1, 0.2])\ncontext = build_context(ContextEnumerator(), index, candidates; chunks_window_margin=(0, 1)) # include only one following chunk for each matching chunk

source


# PromptingTools.Experimental.RAGTools.build_indexMethod.
julia
build_index(\n    indexer::KeywordsIndexer, files_or_docs::Vector{<:AbstractString};\n    verbose::Integer = 1,\n    extras::Union{Nothing, AbstractVector} = nothing,\n    index_id = gensym("ChunkKeywordsIndex"),\n    chunker::AbstractChunker = indexer.chunker,\n    chunker_kwargs::NamedTuple = NamedTuple(),\n    processor::AbstractProcessor = indexer.processor,\n    processor_kwargs::NamedTuple = NamedTuple(),\n    tagger::AbstractTagger = indexer.tagger,\n    tagger_kwargs::NamedTuple = NamedTuple(),\n    api_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0))

Builds a ChunkKeywordsIndex from the provided files or documents to support keyword-based search (BM25).

source


# PromptingTools.Experimental.RAGTools.build_indexMethod.
julia
build_index(\n    indexer::AbstractIndexBuilder, files_or_docs::Vector{<:AbstractString};\n    verbose::Integer = 1,\n    extras::Union{Nothing, AbstractVector} = nothing,\n    index_id = gensym("ChunkEmbeddingsIndex"),\n    chunker::AbstractChunker = indexer.chunker,\n    chunker_kwargs::NamedTuple = NamedTuple(),\n    embedder::AbstractEmbedder = indexer.embedder,\n    embedder_kwargs::NamedTuple = NamedTuple(),\n    tagger::AbstractTagger = indexer.tagger,\n    tagger_kwargs::NamedTuple = NamedTuple(),\n    api_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0))

Build an INDEX for RAG (Retriever-Augmented Generation) applications from the provided file paths. INDEX is a object storing the document chunks and their embeddings (and potentially other information).

The function processes each file or document (depending on chunker), splits its content into chunks, embeds these chunks, optionally extracts metadata, and then combines this information into a retrievable index.

Define your own methods via indexer and its subcomponents (chunker, embedder, tagger).

Arguments

Returns

See also: ChunkEmbeddingsIndex, get_chunks, get_embeddings, get_tags, CandidateChunks, find_closest, find_tags, rerank, retrieve, generate!, airag

Examples

julia
# Default is loading a vector of strings and chunking them (`TextChunker()`)\nindex = build_index(SimpleIndexer(), texts; chunker_kwargs = (; max_length=10))\n\n# Another example with tags extraction, splitting only sentences and verbose output\n# Assuming `test_files` is a vector of file paths\nindexer = SimpleIndexer(chunker=FileChunker(), tagger=OpenTagger())\nindex = build_index(indexer, test_files; \n        chunker_kwargs(; separators=[". "]), verbose=true)

Notes

source


# PromptingTools.Experimental.RAGTools.build_qa_evalsMethod.
julia
build_qa_evals(doc_chunks::Vector{<:AbstractString}, sources::Vector{<:AbstractString};\n               model=PT.MODEL_CHAT, instructions="None.", qa_template::Symbol=:RAGCreateQAFromContext, \n               verbose::Bool=true, api_kwargs::NamedTuple = NamedTuple(), kwargs...) -> Vector{QAEvalItem}

Create a collection of question and answer evaluations (QAEvalItem) from document chunks and sources. This function generates Q&A pairs based on the provided document chunks, using a specified AI model and template.

Arguments

Returns

Vector{QAEvalItem}: A vector of QAEvalItem structs, each containing a source, context, question, and answer. Invalid or empty items are filtered out.

Notes

Examples

Creating Q&A evaluations from a set of document chunks:

julia
doc_chunks = ["Text from document 1", "Text from document 2"]\nsources = ["source1", "source2"]\nqa_evals = build_qa_evals(doc_chunks, sources)

source


# PromptingTools.Experimental.RAGTools.build_tagsFunction.

Builds a matrix of tags and a vocabulary list. REQUIRES SparseArrays, LinearAlgebra, Unicode packages to be loaded!!

source


# PromptingTools.Experimental.RAGTools.build_tagsMethod.
julia
build_tags(tagger::AbstractTagger, chunk_tags::Nothing; kwargs...)

No-op that skips any tag building, returning nothing, nothing

Otherwise, it would build the sparse matrix and the vocabulary (requires SparseArrays and LinearAlgebra packages to be loaded).

source


# PromptingTools.Experimental.RAGTools.chunkdataMethod.

Access chunkdata for a subset of chunks, chunk_idx is a vector of chunk indices in the index

source


# PromptingTools.Experimental.RAGTools.chunkdataMethod.

Access chunkdata for a subset of chunks, chunk_idx is a vector of chunk indices in the index

source


# PromptingTools.Experimental.RAGTools.chunkdataMethod.

Access chunkdata for a subset of chunks, chunk_idx is a vector of chunk indices in the index

source


# PromptingTools.Experimental.RAGTools.cohere_apiMethod.
julia
cohere_api(;\napi_key::AbstractString,\nendpoint::String,\nurl::AbstractString="https://api.cohere.ai/v1",\nhttp_kwargs::NamedTuple=NamedTuple(),\nkwargs...)

Lightweight wrapper around the Cohere API. See https://cohere.com/docs for more details.

Arguments

source


# PromptingTools.Experimental.RAGTools.create_permutation_instructionMethod.
julia
create_permutation_instruction(\n    context::AbstractVector{<:AbstractString}; rank_start::Integer = 1,\n    rank_end::Integer = 100, max_length::Integer = 512, template::Symbol = :RAGRankGPT)

Creates rendered template with injected context passages.

source


# PromptingTools.Experimental.RAGTools.extract_rankingMethod.
julia
extract_ranking(str::AbstractString)

Extracts the ranking from the response into a sorted array of integers.

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(\n    finder::BinaryCosineSimilarity, emb::AbstractMatrix{<:Bool},\n    query_emb::AbstractVector{<:Real}, query_tokens::AbstractVector{<:AbstractString} = String[];\n    top_k::Int = 100, rescore_multiplier::Int = 4, minimum_similarity::AbstractFloat = -1.0, kwargs...)

Finds the indices of chunks (represented by embeddings in emb) that are closest to query embedding (query_emb) using binary embeddings (in the index).

This is a two-pass approach:

Returns only top_k closest indices.

Reference: HuggingFace: Embedding Quantization.

Examples

Convert any Float embeddings to binary like this:

julia
binary_emb = map(>(0), emb)

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(\n    finder::AbstractSimilarityFinder, index::AbstractChunkIndex,\n    query_emb::AbstractVector{<:Real}, query_tokens::AbstractVector{<:AbstractString} = String[];\n    top_k::Int = 100, kwargs...)

Finds the indices of chunks (represented by embeddings in index) that are closest to query embedding (query_emb).

Returns only top_k closest indices.

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(\n    finder::CosineSimilarity, emb::AbstractMatrix{<:Real},\n    query_emb::AbstractVector{<:Real}, query_tokens::AbstractVector{<:AbstractString} = String[];\n    top_k::Int = 100, minimum_similarity::AbstractFloat = -1.0, kwargs...)

Finds the indices of chunks (represented by embeddings in emb) that are closest (in cosine similarity for CosineSimilarity()) to query embedding (query_emb).

finder is the logic used for the similarity search. Default is CosineSimilarity.

If minimum_similarity is provided, only indices with similarity greater than or equal to it are returned. Similarity can be between -1 and 1 (-1 = completely opposite, 1 = exactly the same).

Returns only top_k closest indices.

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(\n    finder::BM25Similarity, dtm::AbstractDocumentTermMatrix,\n    query_emb::AbstractVector{<:Real}, query_tokens::AbstractVector{<:AbstractString} = String[];\n    top_k::Int = 100, minimum_similarity::AbstractFloat = -1.0, kwargs...)

Finds the indices of chunks (represented by DocumentTermMatrix in dtm) that are closest to query tokens (query_tokens) using BM25.

Reference: Wikipedia: BM25. Implementation follows: The Next Generation of Lucene Relevance.

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(\n    finder::BitPackedCosineSimilarity, emb::AbstractMatrix{<:Bool},\n    query_emb::AbstractVector{<:Real}, query_tokens::AbstractVector{<:AbstractString} = String[];\n    top_k::Int = 100, rescore_multiplier::Int = 4, minimum_similarity::AbstractFloat = -1.0, kwargs...)

Finds the indices of chunks (represented by embeddings in emb) that are closest to query embedding (query_emb) using bit-packed binary embeddings (in the index).

This is a two-pass approach:

Returns only top_k closest indices.

Reference: HuggingFace: Embedding Quantization.

Examples

Convert any Float embeddings to bit-packed binary like this:

julia
bitpacked_emb = pack_bits(emb.>0)

source


# PromptingTools.Experimental.RAGTools.find_tagsMethod.
julia
find_tags(method::AnyTagFilter, index::AbstractChunkIndex,\n    tag::Union{AbstractString, Regex}; kwargs...)\n\nfind_tags(method::AnyTagFilter, index::AbstractChunkIndex,\n    tags::Vector{T}; kwargs...) where {T <: Union{AbstractString, Regex}}

Finds the indices of chunks (represented by tags in index) that have ANY OF the specified tag or tags.

source


# PromptingTools.Experimental.RAGTools.find_tagsMethod.
julia
find_tags(method::AllTagFilter, index::AbstractChunkIndex,\n    tag::Union{AbstractString, Regex}; kwargs...)\n\nfind_tags(method::AllTagFilter, index::AbstractChunkIndex,\n    tags::Vector{T}; kwargs...) where {T <: Union{AbstractString, Regex}}

Finds the indices of chunks (represented by tags in index) that have ALL OF the specified tag or tags.

source


# PromptingTools.Experimental.RAGTools.find_tagsMethod.
julia
find_tags(method::NoTagFilter, index::AbstractChunkIndex,\n    tags::Union{T, AbstractVector{<:T}}; kwargs...) where {T <:\n                                                           Union{\n    AbstractString, Regex, Nothing}}\n    tags; kwargs...)

Returns all chunks in the index, ie, no filtering, so we simply return nothing (easier for dispatch).

source


# PromptingTools.Experimental.RAGTools.generate!Method.
julia
generate!(\n    generator::AbstractGenerator, index::AbstractDocumentIndex, result::AbstractRAGResult;\n    verbose::Integer = 1,\n    api_kwargs::NamedTuple = NamedTuple(),\n    contexter::AbstractContextBuilder = generator.contexter,\n    contexter_kwargs::NamedTuple = NamedTuple(),\n    answerer::AbstractAnswerer = generator.answerer,\n    answerer_kwargs::NamedTuple = NamedTuple(),\n    refiner::AbstractRefiner = generator.refiner,\n    refiner_kwargs::NamedTuple = NamedTuple(),\n    postprocessor::AbstractPostprocessor = generator.postprocessor,\n    postprocessor_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Generate the response using the provided generator and the index and result. It is the second step in the RAG pipeline (after retrieve)

Returns the mutated result with the result.final_answer and the full conversation saved in result.conversations[:final_answer].

Notes

Arguments

See also: retrieve, build_context!, ContextEnumerator, answer!, SimpleAnswerer, refine!, NoRefiner, SimpleRefiner, postprocess!, NoPostprocessor

Examples

julia
Assume we already have `index`\n\nquestion = "What are the best practices for parallel computing in Julia?"\n\n# Retrieve the relevant chunks - returns RAGResult\nresult = retrieve(index, question)\n\n# Generate the answer using the default generator, mutates the same result\nresult = generate!(index, result)

source


# PromptingTools.Experimental.RAGTools.get_chunksMethod.
julia
get_chunks(chunker::AbstractChunker,\n    files_or_docs::Vector{<:AbstractString};\n    sources::AbstractVector{<:AbstractString} = files_or_docs,\n    verbose::Bool = true,\n    separators = ["\\n\\n", ". ", "\\n", " "], max_length::Int = 256)

Chunks the provided files_or_docs into chunks of maximum length max_length (if possible with provided separators).

Supports two modes of operation:

Arguments

source


# PromptingTools.Experimental.RAGTools.get_embeddingsMethod.
julia
get_embeddings(embedder::BatchEmbedder, docs::AbstractVector{<:AbstractString};\n    verbose::Bool = true,\n    model::AbstractString = PT.MODEL_EMBEDDING,\n    truncate_dimension::Union{Int, Nothing} = nothing,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    target_batch_size_length::Int = 80_000,\n    ntasks::Int = 4 * Threads.nthreads(),\n    kwargs...)

Embeds a vector of docs using the provided model (kwarg model) in a batched manner - BatchEmbedder.

BatchEmbedder tries to batch embedding calls for roughly 80K characters per call (to avoid exceeding the API rate limit) to reduce network latency.

Notes

Arguments

source


# PromptingTools.Experimental.RAGTools.get_embeddingsMethod.
julia
get_embeddings(embedder::BinaryBatchEmbedder, docs::AbstractVector{<:AbstractString};\n    verbose::Bool = true,\n    model::AbstractString = PT.MODEL_EMBEDDING,\n    truncate_dimension::Union{Int, Nothing} = nothing,\n    return_type::Type = Matrix{Bool},\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    target_batch_size_length::Int = 80_000,\n    ntasks::Int = 4 * Threads.nthreads(),\n    kwargs...)

Embeds a vector of docs using the provided model (kwarg model) in a batched manner and then returns the binary embeddings matrix - BinaryBatchEmbedder.

BinaryBatchEmbedder tries to batch embedding calls for roughly 80K characters per call (to avoid exceeding the API rate limit) to reduce network latency.

Notes

Arguments

source


# PromptingTools.Experimental.RAGTools.get_embeddingsMethod.
julia
get_embeddings(embedder::BitPackedBatchEmbedder, docs::AbstractVector{<:AbstractString};\n    verbose::Bool = true,\n    model::AbstractString = PT.MODEL_EMBEDDING,\n    truncate_dimension::Union{Int, Nothing} = nothing,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    target_batch_size_length::Int = 80_000,\n    ntasks::Int = 4 * Threads.nthreads(),\n    kwargs...)

Embeds a vector of docs using the provided model (kwarg model) in a batched manner and then returns the binary embeddings matrix represented in UInt64 (bit-packed) - BitPackedBatchEmbedder.

BitPackedBatchEmbedder tries to batch embedding calls for roughly 80K characters per call (to avoid exceeding the API rate limit) to reduce network latency.

The best option for FAST and MEMORY-EFFICIENT storage of embeddings, for retrieval use BitPackedCosineSimilarity.

Notes

Arguments

See also: unpack_bits, pack_bits, BitPackedCosineSimilarity.

source


# PromptingTools.Experimental.RAGTools.get_tagsMethod.
julia
get_tags(tagger::NoTagger, docs::AbstractVector{<:AbstractString};\n    kwargs...)

Simple no-op that skips any tagging of the documents

source


# PromptingTools.Experimental.RAGTools.get_tagsMethod.
julia
get_tags(tagger::OpenTagger, docs::AbstractVector{<:AbstractString};\n    verbose::Bool = true,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Extracts "tags" (metadata/keywords) from a vector of docs using the provided model (kwarg model).

Arguments

source


# PromptingTools.Experimental.RAGTools.get_tagsMethod.
julia
get_tags(tagger::PassthroughTagger, docs::AbstractVector{<:AbstractString};\n    tags::AbstractVector{<:AbstractVector{<:AbstractString}},\n    kwargs...)

Pass tags directly as Vector of Vectors of strings (ie, tags[i] is the tags for docs[i]). It then builds the vocabulary from the tags and returns both the tags in matrix form and the vocabulary.

source


# PromptingTools.Experimental.RAGTools.getpropertynestedFunction.
julia
getpropertynested(\n    nt::NamedTuple, parent_keys::Vector{Symbol}, key::Symbol, default = nothing)

Get a property key from a nested NamedTuple nt, where the property is nested to a key in parent_keys.

Useful for nested kwargs where we want to get some property in parent_keys subset (eg, model in retriever_kwargs).

Examples

julia
kw = (; abc = (; def = "x"))\ngetpropertynested(kw, [:abc], :def)\n# Output: "x"

source


# PromptingTools.Experimental.RAGTools.hamming_distanceMethod.
julia
hamming_distance(\n    mat::AbstractMatrix{T}, query::AbstractVector{T})::Vector{Int} where {T <: Integer}

Calculates the column-wise Hamming distance between a matrix of binary vectors mat and a single binary vector vect.

This is the first-pass ranking for BinaryCosineSimilarity method.

Implementation from domluna's tinyRAG.

source


# PromptingTools.Experimental.RAGTools.hcat_truncateMethod.
julia
hcat_truncate(matrices::AbstractVector{<:AbstractMatrix{T}},\n    truncate_dimension::Union{Nothing, Int} = nothing; verbose::Bool = false) where {T <:\n                                                                                     Real}

Horizontal concatenation of matrices, with optional truncation of the rows of each matrix to the specified dimension (reducing embedding dimensionality).

More efficient that a simple splatting, as the resulting matrix is pre-allocated in one go.

Returns: a Matrix{Float32}

Arguments

Examples

julia
a = rand(Float32, 1000, 10)\nb = rand(Float32, 1000, 20)\n\nc = hcat_truncate([a, b])\nsize(c) # (1000, 30)\n\nd = hcat_truncate([a, b], 500)\nsize(d) # (500, 30)

source


# PromptingTools.Experimental.RAGTools.load_textMethod.
julia
load_text(chunker::AbstractChunker, input;\n    kwargs...)

Load text from input using the provided chunker. Called by get_chunks.

Available chunkers:

source


# PromptingTools.Experimental.RAGTools.merge_kwargs_nestedMethod.
julia
merge_kwargs_nested(nt1::NamedTuple, nt2::NamedTuple)

Merges two nested NamedTuples nt1 and nt2 recursively. The nt2 values will overwrite the nt1 values when overlapping.

Example

julia
kw = (; abc = (; def = "x"))\nkw2 = (; abc = (; def = "x", def2 = 2), new = 1)\nmerge_kwargs_nested(kw, kw2)

source


# PromptingTools.Experimental.RAGTools.pack_bitsMethod.
julia
pack_bits(arr::AbstractMatrix{<:Bool}) -> Matrix{UInt64}\npack_bits(vect::AbstractVector{<:Bool}) -> Vector{UInt64}

Pack a matrix or vector of boolean values into a more compact representation using UInt64.

Arguments (Input)

Returns

Examples

For vectors:

julia
bin = rand(Bool, 128)\nbinint = pack_bits(bin)\nbinx = unpack_bits(binint)\n@assert bin == binx

For matrices:

julia
bin = rand(Bool, 128, 10)\nbinint = pack_bits(bin)\nbinx = unpack_bits(binint)\n@assert bin == binx

source


# PromptingTools.Experimental.RAGTools.permutation_step!Method.
julia
permutation_step!(\n    result::RankGPTResult; rank_start::Integer = 1, rank_end::Integer = 100, kwargs...)

One sub-step of the RankGPT algorithm permutation ranking within the window of chunks defined by rank_start and rank_end positions.

source


# PromptingTools.Experimental.RAGTools.preprocess_tokensFunction.
julia
preprocess_tokens(text::AbstractString, stemmer=nothing; stopwords::Union{Nothing,Set{String}}=nothing, min_length::Int=3)

Preprocess provided text by removing numbers, punctuation, and applying stemming for BM25 search index.

Returns a list of preprocessed tokens.

Example

julia
stemmer = Snowball.Stemmer("english")\nstopwords = Set(["a", "an", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "no", "not", "of", "on", "or", "such", "some", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"])\ntext = "This is a sample paragraph to test the functionality of your text preprocessor. It contains a mix of uppercase and lowercase letters, as well as punctuation marks such as commas, periods, and exclamation points! Let's see how your preprocessor handles quotes, like "this one", and also apostrophes, like in don't. Will it preserve the formatting of this paragraph, including the indentation and line breaks?"\npreprocess_tokens(text, stemmer; stopwords)

source


# PromptingTools.Experimental.RAGTools.print_htmlMethod.
julia
print_html([io::IO,] parent_node::AbstractAnnotatedNode)\n\nprint_html([io::IO,] rag::AbstractRAGResult; add_sources::Bool = false,\n    add_scores::Bool = false, default_styler = HTMLStyler(),\n    low_styler = HTMLStyler(styles = "color:magenta", classes = ""),\n    medium_styler = HTMLStyler(styles = "color:blue", classes = ""),\n    high_styler = HTMLStyler(styles = "", classes = ""), styler_kwargs...)

Pretty-prints the annotation parent_node (or RAGResult) to the io stream (or returns the string) in HTML format (assumes node is styled with styler HTMLStyler).

It wraps each "token" into a span with requested styling (HTMLStyler's properties classes and styles). It also replaces new lines with <br> for better HTML formatting.

For any non-HTML styler, it prints the content as plain text.

Returns

See also HTMLStyler, annotate_support, and set_node_style! for how the styling is applied and what the arguments mean.

Examples

Note: RT is an alias for PromptingTools.Experimental.RAGTools

Simple start directly with the RAGResult:

julia
# set up the text/RAGResult\ncontext = [\n    "This is a test context.", "Another context sentence.", "Final piece of context."]\nanswer = "This is a test answer. It has multiple sentences."\nrag = RT.RAGResult(; context, final_answer=answer, question="")\n\n# print the HTML\nprint_html(rag)

Low-level control by creating our AnnotatedNode:

julia
# prepare your HTML styling\nstyler_kwargs = (;\n    default_styler=RT.HTMLStyler(),\n    low_styler=RT.HTMLStyler(styles="color:magenta", classes=""),\n    medium_styler=RT.HTMLStyler(styles="color:blue", classes=""),\n    high_styler=RT.HTMLStyler(styles="", classes=""))\n\n# annotate the text\ncontext = [\n    "This is a test context.", "Another context sentence.", "Final piece of context."]\nanswer = "This is a test answer. It has multiple sentences."\n\nparent_node = RT.annotate_support(\n    RT.TrigramAnnotater(), answer, context; add_sources=false, add_scores=false, styler_kwargs...)\n\n# print the HTML\nprint_html(parent_node)\n\n# or to accumulate more nodes\nio = IOBuffer()\nprint_html(io, parent_node)

source


# PromptingTools.Experimental.RAGTools.rank_gptMethod.
julia
rank_gpt(chunks::AbstractVector{<:AbstractString}, question::AbstractString;\n    verbose::Int = 1, rank_start::Integer = 1, rank_end::Integer = 100,\n    window_size::Integer = 20, step::Integer = 10,\n    num_rounds::Integer = 1, model::String = "gpt4o", kwargs...)

Ranks the chunks based on their relevance for question. Returns the ranking permutation of the chunks in the order they are most relevant to the question (the first is the most relevant).

Example

julia
result = rank_gpt(chunks, question; rank_start=1, rank_end=25, window_size=8, step=4, num_rounds=3, model="gpt4o")

Reference

[1] Is ChatGPT Good at Search? Investigating Large Language Models as Re-Ranking Agents by W. Sun et al. [2] RankGPT Github

source


# PromptingTools.Experimental.RAGTools.rank_sliding_window!Method.
julia
rank_sliding_window!(\n    result::RankGPTResult; verbose::Int = 1, rank_start = 1, rank_end = 100,\n    window_size = 20, step = 10, model::String = "gpt4o", kwargs...)

One single pass of the RankGPT algorithm permutation ranking across all positions between rank_start and rank_end.

source


# PromptingTools.Experimental.RAGTools.receive_permutation!Method.
julia
receive_permutation!(\n    curr_rank::AbstractVector{<:Integer}, response::AbstractString;\n    rank_start::Integer = 1, rank_end::Integer = 100)

Extracts and heals the permutation to contain all ranking positions.

source


# PromptingTools.Experimental.RAGTools.reciprocal_rank_fusionMethod.
julia
reciprocal_rank_fusion(args...; k::Int=60)

Merges multiple rankings and calculates the reciprocal rank score for each chunk (discounted by the inverse of the rank).

Example

julia
positions1 = [1, 3, 5, 7, 9]\npositions2 = [2, 4, 6, 8, 10]\npositions3 = [2, 4, 6, 11, 12]\n\nmerged_positions, scores = reciprocal_rank_fusion(positions1, positions2, positions3)

source


# PromptingTools.Experimental.RAGTools.reciprocal_rank_fusionMethod.
julia
reciprocal_rank_fusion(\n    positions1::AbstractVector{<:Integer}, scores1::AbstractVector{<:T},\n    positions2::AbstractVector{<:Integer},\n    scores2::AbstractVector{<:T}; k::Int = 60) where {T <: Real}

Merges two sets of rankings and their joint scores. Calculates the reciprocal rank score for each chunk (discounted by the inverse of the rank).

Example

julia
positions1 = [1, 3, 5, 7, 9]\nscores1 = [0.9, 0.8, 0.7, 0.6, 0.5]\npositions2 = [2, 4, 6, 8, 10]\nscores2 = [0.5, 0.6, 0.7, 0.8, 0.9]\n\nmerged, scores = reciprocal_rank_fusion(positions1, scores1, positions2, scores2; k = 60)

source


# PromptingTools.Experimental.RAGTools.refine!Method.
julia
refine!(\n    refiner::NoRefiner, index::AbstractChunkIndex, result::AbstractRAGResult;\n    kwargs...)

Simple no-op function for refine!. It simply copies the result.answer and result.conversations[:answer] without any changes.

source


# PromptingTools.Experimental.RAGTools.refine!Method.
julia
refine!(\n    refiner::SimpleRefiner, index::AbstractDocumentIndex, result::AbstractRAGResult;\n    verbose::Bool = true,\n    model::AbstractString = PT.MODEL_CHAT,\n    template::Symbol = :RAGAnswerRefiner,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Give model a chance to refine the answer (using the same or different context than previously provided).

This method uses the same context as the original answer, however, it can be modified to do additional retrieval and use a different context.

Returns

Arguments

source


# PromptingTools.Experimental.RAGTools.refine!Method.
julia
refine!(\n    refiner::TavilySearchRefiner, index::AbstractDocumentIndex, result::AbstractRAGResult;\n    verbose::Bool = true,\n    model::AbstractString = PT.MODEL_CHAT,\n    include_answer::Bool = true,\n    max_results::Integer = 5,\n    include_domains::AbstractVector{<:AbstractString} = String[],\n    exclude_domains::AbstractVector{<:AbstractString} = String[],\n    template::Symbol = :RAGWebSearchRefiner,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Refines the answer by executing a web search using the Tavily API. This method aims to enhance the answer's accuracy and relevance by incorporating information retrieved from the web.

Note: The web results and web answer (if requested) will be added to the context and sources!

Returns

Arguments

Example

julia
refiner!(TavilySearchRefiner(), index, result)\n# See result.final_answer or pprint(result)

To enable this refiner in a full RAG pipeline, simply swap the component in the config:

julia
cfg = RT.RAGConfig()\ncfg.generator.refiner = RT.TavilySearchRefiner()\n\nresult = airag(cfg, index; question, return_all = true)\npprint(result)

source


# PromptingTools.Experimental.RAGTools.rephraseMethod.
julia
rephrase(rephraser::SimpleRephraser, question::AbstractString;\n    verbose::Bool = true,\n    model::String = PT.MODEL_CHAT, template::Symbol = :RAGQueryHyDE,\n    cost_tracker = Threads.Atomic{Float64}(0.0))

Rephrases the question using the provided rephraser template = RAGQueryHyDE.

Special flavor of rephrasing using HyDE (Hypothetical Document Embedding) method, which aims to find the documents most similar to a synthetic passage that would be a good answer to our question.

Returns both the original and the rephrased question.

Arguments

source


# PromptingTools.Experimental.RAGTools.rephraseMethod.
julia
rephrase(rephraser::NoRephraser, question::AbstractString; kwargs...)

No-op, simple passthrough.

source


# PromptingTools.Experimental.RAGTools.rephraseMethod.
julia
rephrase(rephraser::SimpleRephraser, question::AbstractString;\n    verbose::Bool = true,\n    model::String = PT.MODEL_CHAT, template::Symbol = :RAGQueryOptimizer,\n    cost_tracker = Threads.Atomic{Float64}(0.0), kwargs...)

Rephrases the question using the provided rephraser template.

Returns both the original and the rephrased question.

Arguments

source


# PromptingTools.Experimental.RAGTools.rerankMethod.
julia
rerank(\n    reranker::CohereReranker, index::AbstractDocumentIndex, question::AbstractString,\n    candidates::AbstractCandidateChunks;\n    verbose::Bool = false,\n    api_key::AbstractString = PT.COHERE_API_KEY,\n    top_n::Integer = length(candidates.scores),\n    model::AbstractString = "rerank-english-v3.0",\n    return_documents::Bool = false,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Re-ranks a list of candidate chunks using the Cohere Rerank API. See https://cohere.com/rerank for more details.

Arguments

source


# PromptingTools.Experimental.RAGTools.rerankMethod.
julia
rerank(\n    reranker::RankGPTReranker, index::AbstractDocumentIndex, question::AbstractString,\n    candidates::AbstractCandidateChunks;\n    api_key::AbstractString = PT.OPENAI_API_KEY,\n    model::AbstractString = PT.MODEL_CHAT,\n    verbose::Bool = false,\n    top_n::Integer = length(candidates.scores),\n    unique_chunks::Bool = true,\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Re-ranks a list of candidate chunks using the RankGPT algorithm. See https://github.com/sunnweiwei/RankGPT for more details.

It uses LLM calls to rank the candidate chunks.

Arguments

Examples

julia
index = <some index>\nquestion = "What are the best practices for parallel computing in Julia?"\n\ncfg = RAGConfig(; retriever = SimpleRetriever(; reranker = RT.RankGPTReranker()))\nmsg = airag(cfg, index; question, return_all = true)

To get full verbosity of logs, set verbose = 5 (anything higher than 3).

julia
msg = airag(cfg, index; question, return_all = true, verbose = 5)

Reference

[1] Is ChatGPT Good at Search? Investigating Large Language Models as Re-Ranking Agents by W. Sun et al. [2] RankGPT Github

source


# PromptingTools.Experimental.RAGTools.retrieveMethod.
julia
retrieve(retriever::AbstractRetriever,\n    index::AbstractChunkIndex,\n    question::AbstractString;\n    verbose::Integer = 1,\n    top_k::Integer = 100,\n    top_n::Integer = 5,\n    api_kwargs::NamedTuple = NamedTuple(),\n    rephraser::AbstractRephraser = retriever.rephraser,\n    rephraser_kwargs::NamedTuple = NamedTuple(),\n    embedder::AbstractEmbedder = retriever.embedder,\n    embedder_kwargs::NamedTuple = NamedTuple(),\n    processor::AbstractProcessor = retriever.processor,\n    processor_kwargs::NamedTuple = NamedTuple(),\n    finder::AbstractSimilarityFinder = retriever.finder,\n    finder_kwargs::NamedTuple = NamedTuple(),\n    tagger::AbstractTagger = retriever.tagger,\n    tagger_kwargs::NamedTuple = NamedTuple(),\n    filter::AbstractTagFilter = retriever.filter,\n    filter_kwargs::NamedTuple = NamedTuple(),\n    reranker::AbstractReranker = retriever.reranker,\n    reranker_kwargs::NamedTuple = NamedTuple(),\n    cost_tracker = Threads.Atomic{Float64}(0.0),\n    kwargs...)

Retrieves the most relevant chunks from the index for the given question and returns them in the RAGResult object.

This is the main entry point for the retrieval stage of the RAG pipeline. It is often followed by generate! step.

Notes:

The arguments correspond to the steps of the retrieval process (rephrasing, embedding, finding similar docs, tagging, filtering by tags, reranking). You can customize each step by providing a new custom type that dispatches the corresponding function, eg, create your own type struct MyReranker<:AbstractReranker end and define the custom method for it rerank(::MyReranker,...) = ....

Note: Discover available retrieval sub-types for each step with subtypes(AbstractRephraser) and similar for other abstract types.

If you're using locally-hosted models, you can pass the api_kwargs with the url field set to the model's URL and make sure to provide corresponding model kwargs to rephraser, embedder, and tagger to use the custom models (they make AI calls).

Arguments

See also: SimpleRetriever, AdvancedRetriever, build_index, rephrase, get_embeddings, get_keywords, find_closest, get_tags, find_tags, rerank, RAGResult.

Examples

Find the 5 most relevant chunks from the index for the given question.

julia
# assumes you have an existing index `index`\nretriever = SimpleRetriever()\n\nresult = retrieve(retriever,\n    index,\n    "What is the capital of France?",\n    top_n = 5)\n\n# or use the default retriever (same as above)\nresult = retrieve(retriever,\n    index,\n    "What is the capital of France?",\n    top_n = 5)

Apply more advanced retrieval with question rephrasing and reranking (requires COHERE_API_KEY). We will obtain top 100 chunks from embeddings (top_k) and top 5 chunks from reranking (top_n).

julia
retriever = AdvancedRetriever()\n\nresult = retrieve(retriever, index, question; top_k=100, top_n=5)

You can use the retriever to customize your retrieval strategy or directly change the strategy types in the retrieve kwargs!

Example of using locally-hosted model hosted on localhost:8080:

julia
retriever = SimpleRetriever()\nresult = retrieve(retriever, index, question;\n    rephraser_kwargs = (; model = "custom"),\n    embedder_kwargs = (; model = "custom"),\n    tagger_kwargs = (; model = "custom"), api_kwargs = (;\n        url = "http://localhost:8080"))

source


# PromptingTools.Experimental.RAGTools.run_qa_evalsMethod.
julia
run_qa_evals(index::AbstractChunkIndex, qa_items::AbstractVector{<:QAEvalItem};\n    api_kwargs::NamedTuple = NamedTuple(),\n    airag_kwargs::NamedTuple = NamedTuple(),\n    qa_evals_kwargs::NamedTuple = NamedTuple(),\n    verbose::Bool = true, parameters_dict::Dict{Symbol, <:Any} = Dict{Symbol, Any}())

Evaluates a vector of QAEvalItems and returns a vector QAEvalResult. This function assesses the relevance and accuracy of the answers generated in a QA evaluation context.

See ?run_qa_evals for more details.

Arguments

Returns

Vector{QAEvalResult}: Vector of evaluation results that includes various scores and metadata related to the QA evaluation.

Example

julia
index = "..." # Assuming a proper index is defined\nqa_items = [QAEvalItem(question="What is the capital of France?", answer="Paris", context="France is a country in Europe."),\n            QAEvalItem(question="What is the capital of Germany?", answer="Berlin", context="Germany is a country in Europe.")]\n\n# Let's run a test with `top_k=5`\nresults = run_qa_evals(index, qa_items; airag_kwargs=(;top_k=5), parameters_dict=Dict(:top_k => 5))\n\n# Filter out the "failed" calls\nresults = filter(x->!isnothing(x.answer_score), results);\n\n# See average judge score\nmean(x->x.answer_score, results)

source


# PromptingTools.Experimental.RAGTools.run_qa_evalsMethod.
julia
run_qa_evals(qa_item::QAEvalItem, ctx::RAGResult; verbose::Bool = true,\n             parameters_dict::Dict{Symbol, <:Any}, judge_template::Symbol = :RAGJudgeAnswerFromContext,\n             model_judge::AbstractString, api_kwargs::NamedTuple = NamedTuple()) -> QAEvalResult

Evaluates a single QAEvalItem using RAG details (RAGResult) and returns a QAEvalResult structure. This function assesses the relevance and accuracy of the answers generated in a QA evaluation context.

Arguments

Returns

QAEvalResult: An evaluation result that includes various scores and metadata related to the QA evaluation.

Notes

Examples

Evaluating a QA pair using a specific context and model:

julia
qa_item = QAEvalItem(question="What is the capital of France?", answer="Paris", context="France is a country in Europe.")\nctx = RAGResult(source="Wikipedia", context="France is a country in Europe.", answer="Paris")\nparameters_dict = Dict("param1" => "value1", "param2" => "value2")\n\neval_result = run_qa_evals(qa_item, ctx, parameters_dict=parameters_dict, model_judge="MyAIJudgeModel")

source


# PromptingTools.Experimental.RAGTools.score_retrieval_hitMethod.

Returns 1.0 if context overlaps or is contained within any of the candidate_context

source


# PromptingTools.Experimental.RAGTools.score_retrieval_rankMethod.

Returns Integer rank of the position where context overlaps or is contained within a candidate_context

source


# PromptingTools.Experimental.RAGTools.score_to_unit_scaleMethod.
julia
score_to_unit_scale(x::AbstractVector{T}) where T<:Real

Shift and scale a vector of scores to the unit scale [0, 1].

Example

julia
x = [1.0, 2.0, 3.0, 4.0, 5.0]\nscaled_x = score_to_unit_scale(x)

source


# PromptingTools.Experimental.RAGTools.set_node_style!Method.
julia
set_node_style!(::TrigramAnnotater, node::AnnotatedNode;\n    low_threshold::Float64 = 0.0, medium_threshold::Float64 = 0.5, high_threshold::Float64 = 1.0,\n    default_styler::AbstractAnnotationStyler = Styler(),\n    low_styler::AbstractAnnotationStyler = Styler(color = :magenta, bold = false),\n    medium_styler::AbstractAnnotationStyler = Styler(color = :blue, bold = false),\n    high_styler::AbstractAnnotationStyler = Styler(color = :nothing, bold = false),\n    bold_multihits::Bool = false)

Sets style of node based on the provided rules

source


# PromptingTools.Experimental.RAGTools.setpropertynestedMethod.
julia
setpropertynested(nt::NamedTuple, parent_keys::Vector{Symbol},\n    key::Symbol,\n    value

)

Setter for a property key in a nested NamedTuple nt, where the property is nested to a key in parent_keys.

Useful for nested kwargs where we want to change some property in parent_keys subset (eg, model in retriever_kwargs).

Examples

julia
kw = (; abc = (; def = "x"))\nsetpropertynested(kw, [:abc], :def, "y")\n# Output: (abc = (def = "y",),)

Practical example of changing all model keys in CHAT-based steps in the pipeline:

julia
# changes :model to "gpt4t" whenever the parent key is in the below list (chat-based steps)\nsetpropertynested(kwargs,\n    [:rephraser_kwargs, :tagger_kwargs, :answerer_kwargs, :refiner_kwargs],\n    :model, "gpt4t")

Or changing an embedding model (across both indexer and retriever steps, because it's same step name):

julia
kwargs = setpropertynested(\n        kwargs, [:embedder_kwargs],\n        :model, "text-embedding-3-large"\n    )

source


# PromptingTools.Experimental.RAGTools.split_into_code_and_sentencesMethod.
julia
split_into_code_and_sentences(input::Union{String, SubString{String}})

Splits text block into code or text and sub-splits into units.

If code block, it splits by newline but keep the group_id the same (to have the same source) If text block, splits into sentences, bullets, etc., provides different group_id (to have different source)

source


# PromptingTools.Experimental.RAGTools.tags_extractMethod.
julia
tags_extract(item::Tag)\ntags_extract(tags::Vector{Tag})

Extracts the Tag item into a string of the form category:::value (lowercased and spaces replaced with underscores).

Example

julia
msg = aiextract(:RAGExtractMetadataShort; return_type=MaybeTags, text="I like package DataFrames", instructions="None.")\nmetadata = tags_extract(msg.content.items)

source


# PromptingTools.Experimental.RAGTools.token_with_boundariesMethod.
julia
token_with_boundaries(\n    prev_token::Union{Nothing, AbstractString}, curr_token::AbstractString,\n    next_token::Union{Nothing, AbstractString})

Joins the three tokens together. Useful to add boundary tokens (like spaces vs brackets) to the curr_token to improve the matched context (ie, separate partial matches from exact match)

source


# PromptingTools.Experimental.RAGTools.tokenizeMethod.
julia
tokenize(input::Union{String, SubString{String}})

Tokenizes provided input by spaces, special characters or Julia symbols (eg, =>).

Unlike other tokenizers, it aims to lossless - ie, keep both the separated text and the separators.

source


# PromptingTools.Experimental.RAGTools.translate_positions_to_parentMethod.
julia
translate_positions_to_parent(index::AbstractChunkIndex, positions::AbstractVector{<:Integer})

Translate positions to the parent index. Useful to convert between positions in a view and the original index.

Used whenever a chunkdata() is used to re-align positions in case index is a view.

source


# PromptingTools.Experimental.RAGTools.translate_positions_to_parentMethod.
julia
translate_positions_to_parent(\n    index::SubChunkIndex, pos::AbstractVector{<:Integer})

Translate positions to the parent index. Useful to convert between positions in a view and the original index.

Used whenever a chunkdata() or tags() are used to re-align positions to the "parent" index.

source


# PromptingTools.Experimental.RAGTools.trigram_support!Method.
julia
trigram_support!(parent_node::AnnotatedNode,\n    context_trigrams::AbstractVector, trigram_func::F1 = trigrams, token_transform::F2 = identity;\n    skip_trigrams::Bool = false, min_score::Float64 = 0.5,\n    min_source_score::Float64 = 0.25,\n    stop_words::AbstractVector{<:String} = STOPWORDS,\n    styler_kwargs...) where {F1 <: Function, F2 <: Function}

Find if the parent_node.content is supported by the provided context_trigrams.

Logic:

For diagnostics, you can use AbstractTrees.print_tree(parent_node) to see the tree structure of each token and its score.

Example

julia
\nnode = AnnotatedNode(content = "xyz")  trigram_support!(node, context_trigrams) # updates node.children! ```\n\n\n[source](https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/Experimental/RAGTools/annotation.jl#L215-L244)\n\n</div>\n<br>\n<div style='border-width:1px; border-style:solid; border-color:black; padding: 1em; border-radius: 25px;'>\n<a id='PromptingTools.Experimental.RAGTools.trigrams-Tuple{AbstractString}' href='#PromptingTools.Experimental.RAGTools.trigrams-Tuple{AbstractString}'>#</a>&nbsp;<b><u>PromptingTools.Experimental.RAGTools.trigrams</u></b> &mdash; <i>Method</i>.\n\n\n\n\n```julia\ntrigrams(input_string::AbstractString; add_word::AbstractString = "")

Splits provided input_string into a vector of trigrams (combination of three consecutive characters found in the input_string).

If add_word is provided, it is added to the resulting array. Useful to add the full word itself to the resulting array for exact match.

source


# PromptingTools.Experimental.RAGTools.trigrams_hashedMethod.
julia
trigrams_hashed(input_string::AbstractString; add_word::AbstractString = "")

Splits provided input_string into a Set of hashed trigrams (combination of three consecutive characters found in the input_string).

It is more efficient for lookups in large strings (eg, >100K characters).

If add_word is provided, it is added to the resulting array to hash. Useful to add the full word itself to the resulting array for exact match.

source


# PromptingTools.last_messageMethod.
julia
PT.last_message(result::RAGResult)

Extract the last message from the RAGResult. It looks for final_answer first, then answer fields in the conversations dictionary. Returns nothing if not found.

source


# PromptingTools.last_outputMethod.

Extracts the last output (generated text answer) from the RAGResult.

source


# PromptingTools.pprintMethod.
julia
PromptingTools.pprint(\n    io::IO, node::AbstractAnnotatedNode;\n    text_width::Int = displaysize(io)[2], add_newline::Bool = true)

Pretty print the node to the io stream, including all its children

Supports only node.style::Styler for now.

source


# PromptingTools.pprintMethod.
julia
PT.pprint(\n    io::IO, r::AbstractRAGResult; add_context::Bool = false,\n    text_width::Int = displaysize(io)[2], annotater_kwargs...)

Pretty print the RAG result r to the given io stream.

If add_context is true, the context will be printed as well. The text_width parameter can be used to control the width of the output.

You can provide additional keyword arguments to the annotater, eg, add_sources, add_scores, min_score, etc. See annotate_support for more details.

source


', 284) ])); } const reference_ragtools = /* @__PURE__ */ _export_sfc(_sfc_main, [["render", _sfc_render]]); diff --git a/previews/PR218/coverage_of_model_providers.html b/previews/PR218/coverage_of_model_providers.html index c0557d736..7cf78359e 100644 --- a/previews/PR218/coverage_of_model_providers.html +++ b/previews/PR218/coverage_of_model_providers.html @@ -8,9 +8,9 @@ - + - + @@ -18,7 +18,7 @@
Skip to content

Coverage of Model Providers

PromptingTools.jl routes AI calls through the use of subtypes of AbstractPromptSchema, which determine how data is formatted and where it is sent. (For example, OpenAI models have the corresponding subtype AbstractOpenAISchema, having the corresponding schemas - OpenAISchema, CustomOpenAISchema, etc.) This ensures that the data is correctly formatted for the specific AI model provider.

Below is an overview of the model providers supported by PromptingTools.jl, along with the corresponding schema information.

Abstract SchemaSchemaModel Provideraigenerateaiembedaiextractaiscanaiimageaiclassify
AbstractOpenAISchemaOpenAISchemaOpenAI
AbstractOpenAISchemaCustomOpenAISchema*Any OpenAI-compatible API (eg, vLLM)*
AbstractOpenAISchemaLocalServerOpenAISchema**Any OpenAI-compatible Local server**
AbstractOpenAISchemaMistralOpenAISchemaMistral AI
AbstractOpenAISchemaDatabricksOpenAISchemaDatabricks
AbstractOpenAISchemaFireworksOpenAISchemaFireworks AI
AbstractOpenAISchemaTogetherOpenAISchemaTogether AI
AbstractOpenAISchemaGroqOpenAISchemaGroq
AbstractOllamaSchemaOllamaSchemaOllama (endpoint api/chat)
AbstractManagedSchemaAbstractOllamaManagedSchemaOllama (endpoint api/generate)
AbstractAnthropicSchemaAnthropicSchemaAnthropic
AbstractGoogleSchemaGoogleSchemaGoogle Gemini
  • Catch-all implementation - Requires providing a url with api_kwargs and corresponding API key.

** This schema is a flavor of CustomOpenAISchema with a url key preset by global preference key LOCAL_SERVER. It is specifically designed for seamless integration with Llama.jl and utilizes an ENV variable for the URL, making integration easier in certain workflows, such as when nested calls are involved and passing api_kwargs is more challenging.

Note 1: aitools has identical support as aiextract for all providers, as it has the API requirements.

Note 2: The aiscan and aiimage functions rely on specific endpoints being implemented by the provider. Ensure that the provider you choose supports these functionalities.

For more detailed explanations of the functions and schema information, refer to How It Works.

- + \ No newline at end of file diff --git a/previews/PR218/examples/building_RAG.html b/previews/PR218/examples/building_RAG.html index b5fe79649..611832642 100644 --- a/previews/PR218/examples/building_RAG.html +++ b/previews/PR218/examples/building_RAG.html @@ -8,9 +8,9 @@ - + - + @@ -79,7 +79,7 @@ results = filter(x->!isnothing(x.answer_score), results);

Note: You could also use the vectorized version results = run_qa_evals(index, evals) to evaluate all items at once.

julia

 # Let's take a simple average to calculate our score
 @info "RAG Evals: $(length(results)) results, Avg. score: $(round(mean(x->x.answer_score, results);digits=1)), Retrieval score: $(100*round(Int,mean(x->x.retrieval_score,results)))%"
[ Info: RAG Evals: 10 results, Avg. score: 4.6, Retrieval score: 100%

Note: The retrieval score is 100% only because we have two small documents and running on 10 items only. In practice, you would have a much larger document set and a much larger eval set, which would result in a more representative retrieval score.

You can also analyze the results in a DataFrame:

julia
df = DataFrame(results)
10×8 DataFrame
Rowsourcecontextquestionanswerretrieval_scoreretrieval_rankanswer_scoreparameters
StringStringStringSubStrin…Float64Int64Float64Dict…
1examples/data/database_style_joins.txtDatabase-Style Joins\nIntroduction to joins\nWe often need to combine two or more data sets together to provide a complete picture of the topic we are studying. For example, suppose that we have the following two data sets:\n\njulia> using DataFramesWhat is the purpose of joining two or more data sets together?The purpose of joining two or more data sets together is to combine the data sets based on a common key and provide a complete picture of the topic being studied.1.015.0Dict(:top_k=>3)
2examples/data/database_style_joins.txtjulia> people = DataFrame(ID=[20, 40], Name=["John Doe", "Jane Doe"])\n2×2 DataFrame\n Row │ ID Name\n │ Int64 String\n─────┼─────────────────\n 1 │ 20 John Doe\n 2 │ 40 Jane DoeWhat is the DataFrame called 'people' composed of?The DataFrame called 'people' consists of two columns: 'ID' and 'Name'. The 'ID' column contains integers, and the 'Name' column contains strings.1.014.0Dict(:top_k=>3)
3examples/data/database_style_joins.txtjulia> jobs = DataFrame(ID=[20, 40], Job=["Lawyer", "Doctor"])\n2×2 DataFrame\n Row │ ID Job\n │ Int64 String\n─────┼───────────────\n 1 │ 20 Lawyer\n 2 │ 40 DoctorWhat are the jobs and IDs listed in the dataframe?The jobs and IDs listed in the dataframe are as follows:\n\nID: 20\nJob: Lawyer\n\nID: 40\nJob: Doctor1.014.67Dict(:top_k=>3)
4examples/data/database_style_joins.txtWe might want to work with a larger data set that contains both the names and jobs for each ID. We can do this using the innerjoin function:How can we combine the names and jobs for each ID in a larger data set?We can use the `innerjoin` function to combine the names and jobs for each ID in a larger data set.1.014.33333Dict(:top_k=>3)
5examples/data/database_style_joins.txtjulia> innerjoin(people, jobs, on = :ID)\n2×3 DataFrame\n Row │ ID Name Job\n │ Int64 String String\n─────┼─────────────────────────\n 1 │ 20 John Doe Lawyer\n 2 │ 40 Jane Doe DoctorWhat is the name of the person with the ID 40 and their job?The name of the person with the ID 40 is Jane Doe and their job is Doctor.1.024.67Dict(:top_k=>3)
6examples/data/database_style_joins.txtIn relational database theory, this operation is generally referred to as a join. The columns used to determine which rows should be combined during a join are called keys.\n\nThe following functions are provided to perform seven kinds of joins:What are the different kinds of joins?The different kinds of joins are:\n\n1. Inner Join: Returns only the rows that have matching values in both data frames.\n2. Left Join: Returns all rows from the left data frame and the matching rows from the right data frame.\n3. Right Join: Returns all rows from the right data frame and the matching rows from the left data frame.\n4. Full Outer Join: Returns all rows from both data frames and fills in missing values with null.\n5. Cross Join: Returns the cartesian product of the rows from both data frames.\n6. Semi Join: Returns only the rows from the left data frame that have matching values in the right data frame.\n7. Anti Join: Returns only the rows from the left data frame that do not have matching values in the right data frame.1.014.66667Dict(:top_k=>3)
7examples/data/database_style_joins.txtinnerjoin: the output contains rows for values of the key that exist in all passed data frames.What does the output of the inner join operation contain?The output of the inner join operation contains only the rows for values of the key that exist in all passed data frames.1.015.0Dict(:top_k=>3)
8examples/data/database_style_joins.txtleftjoin: the output contains rows for values of the key that exist in the first (left) argument, whether or not that value exists in the second (right) argument.What is the purpose of the left join operation?The purpose of the left join operation is to combine data from two tables based on a common key, where all rows from the left (first) table are included in the output, regardless of whether there is a match in the right (second) table.1.014.66667Dict(:top_k=>3)
9examples/data/database_style_joins.txtrightjoin: the output contains rows for values of the key that exist in the second (right) argument, whether or not that value exists in the first (left) argument.What is the purpose of the right join operation?The purpose of the right join operation is to include all the rows from the second (right) argument, regardless of whether a match is found in the first (left) argument.1.014.67Dict(:top_k=>3)
10examples/data/database_style_joins.txtouterjoin: the output contains rows for values of the key that exist in any of the passed data frames.\nsemijoin: Like an inner join, but output is restricted to columns from the first (left) argument.What is the difference between outer join and semi join?The difference between outer join and semi join is that outer join includes rows for values of the key that exist in any of the passed data frames, whereas semi join is like an inner join but only outputs columns from the first argument.1.014.66667Dict(:top_k=>3)

We're done for today!

What would we do next?

... and much more! See some ideas in Anyscale RAG tutorial


This page was generated using Literate.jl.

- + \ No newline at end of file diff --git a/previews/PR218/examples/readme_examples.html b/previews/PR218/examples/readme_examples.html index 417bbf692..bee931d5b 100644 --- a/previews/PR218/examples/readme_examples.html +++ b/previews/PR218/examples/readme_examples.html @@ -8,9 +8,9 @@ - + - + @@ -140,7 +140,7 @@ api_key = "..." prompt = "Say hi!" msg = aigenerate(PT.CustomOpenAISchema(), prompt; model="my_model", api_key, api_kwargs=(; url="http://localhost:8081"))

As you can see, it also works for any local models that you might have running on your computer!

Note: At the moment, we only support aigenerate and aiembed functions for MistralAI and other OpenAI-compatible APIs. We plan to extend the support in the future.

- + \ No newline at end of file diff --git a/previews/PR218/examples/working_with_aitemplates.html b/previews/PR218/examples/working_with_aitemplates.html index da00b4993..48ec9b86f 100644 --- a/previews/PR218/examples/working_with_aitemplates.html +++ b/previews/PR218/examples/working_with_aitemplates.html @@ -8,9 +8,9 @@ - + - + @@ -60,7 +60,7 @@ tpl; description = "For asking data analysis questions in Julia language. Placeholders: `ask`") rm(filename) # cleanup if we don't like it

When you create a new template, remember to re-load the templates with load_templates!() so that it's available for use.

julia
PT.load_templates!();

!!! If you have some good templates (or suggestions for the existing ones), please consider sharing them with the community by opening a PR to the templates directory!


This page was generated using Literate.jl.

- + \ No newline at end of file diff --git a/previews/PR218/examples/working_with_custom_apis.html b/previews/PR218/examples/working_with_custom_apis.html index 328f6fc65..2a6209667 100644 --- a/previews/PR218/examples/working_with_custom_apis.html +++ b/previews/PR218/examples/working_with_custom_apis.html @@ -8,9 +8,9 @@ - + - + @@ -54,7 +54,7 @@ msg = aiextract(prompt; return_type=Food, model="firefunction") msg.content # Output: Food("apple", ["delicious", "juicy"])

For embedding a text, use aiembed:

julia
aiembed(PT.FireworksOpenAISchema(), "embed me"; model="nomic-ai/nomic-embed-text-v1.5")

Note: You can register the model with PT.register_model! and use it as usual.

- + \ No newline at end of file diff --git a/previews/PR218/examples/working_with_google_ai_studio.html b/previews/PR218/examples/working_with_google_ai_studio.html index 2fe1132f7..1074edd04 100644 --- a/previews/PR218/examples/working_with_google_ai_studio.html +++ b/previews/PR218/examples/working_with_google_ai_studio.html @@ -8,9 +8,9 @@ - + - + @@ -28,7 +28,7 @@ The Force flows through all living things, not machines. Seek balance in the Force, and your heart will find true connection. Remember, the path of the Jedi is to serve others, not to be attached to possessions.")

Gotchas

- + \ No newline at end of file diff --git a/previews/PR218/examples/working_with_ollama.html b/previews/PR218/examples/working_with_ollama.html index 0a7d3de8a..cf2caf4af 100644 --- a/previews/PR218/examples/working_with_ollama.html +++ b/previews/PR218/examples/working_with_ollama.html @@ -8,9 +8,9 @@ - + - + @@ -57,7 +57,7 @@ model = "openhermes2.5-mistral")
PromptingTools.DataMessage(Matrix{Float64} of size (4096, 2))

Cosine similarity is then a simple multiplication

julia
msg.content' * msg.content[:, 1]
2-element Vector{Float64}:
  0.9999999999999982
  0.40796033843072876

This page was generated using Literate.jl.

- + \ No newline at end of file diff --git a/previews/PR218/extra_tools/agent_tools_intro.html b/previews/PR218/extra_tools/agent_tools_intro.html index 443153f25..3edcd8eb9 100644 --- a/previews/PR218/extra_tools/agent_tools_intro.html +++ b/previews/PR218/extra_tools/agent_tools_intro.html @@ -8,11 +8,11 @@ - + - + - + @@ -43,7 +43,7 @@ # Note: you could also use the do-syntax, eg, airetry!(out, "You must answer with 1 word only.") do aicall length(split(last_output(aicall), r" |\\.")) == 1 -end

You can even add the guessing itself as an airetry! condition of last_output(out) == "yellow" and provide feedback if the guess is wrong.

References

# PromptingTools.Experimental.AgentTools.AIGenerateFunction.
julia
AIGenerate(args...; kwargs...)

Creates a lazy instance of aigenerate. It is an instance of AICall with aigenerate as the function.

Use exactly the same arguments and keyword arguments as aigenerate (see ?aigenerate for details).

source


# PromptingTools.Experimental.AgentTools.AICallType.
julia
AICall(func::F, args...; kwargs...) where {F<:Function}
+end

You can even add the guessing itself as an airetry! condition of last_output(out) == "yellow" and provide feedback if the guess is wrong.

References

# PromptingTools.Experimental.AgentTools.AIGenerateFunction.
julia
AIGenerate(args...; kwargs...)

Creates a lazy instance of aigenerate. It is an instance of AICall with aigenerate as the function.

Use exactly the same arguments and keyword arguments as aigenerate (see ?aigenerate for details).

source


# PromptingTools.Experimental.AgentTools.AICallType.
julia
AICall(func::F, args...; kwargs...) where {F<:Function}
 
 AIGenerate(args...; kwargs...)
 AIEmbed(args...; kwargs...)
@@ -58,7 +58,7 @@
 aicall = AIGenerate(:JuliaExpertAsk; ask="xyz", model="abc", api_kwargs=(; temperature=0.1))

Trigger the AICall with run! (it returns the update AICall struct back):

julia
aicall |> run!
 ````
 
-You can also use `AICall` as a functor to trigger the AI call with a `UserMessage` or simply the text to send:

julia aicall(UserMessage("Hello, world!")) # Triggers the lazy call result = run!(aicall) # Explicitly runs the AI call ``` This can be used to "reply" to previous message / continue the stored conversation

Notes

  • The AICall struct is a key component in building flexible and efficient Agentic pipelines

  • The lazy evaluation model allows for setting up the call parameters in advance and deferring the actual execution until it is explicitly triggered.

  • This struct is particularly useful in scenarios where the timing of AI function execution needs to be deferred or where multiple potential calls need to be prepared and selectively executed.

source


# PromptingTools.last_outputFunction.

Extracts the last output (generated text answer) from the RAGResult.

source

Helpful accessor for AICall blocks. Returns the last output in the conversation (eg, the string/data in the last message).

source

Helpful accessor for the last generated output (msg.content) in conversation. Returns the last output in the conversation (eg, the string/data in the last message).

source


# PromptingTools.last_messageFunction.
julia
PT.last_message(result::RAGResult)

Extract the last message from the RAGResult. It looks for final_answer first, then answer fields in the conversations dictionary. Returns nothing if not found.

source

Helpful accessor for AICall blocks. Returns the last message in the conversation.

source

Helpful accessor for the last message in conversation. Returns the last message in the conversation.

source


# PromptingTools.Experimental.AgentTools.airetry!Function.
julia
airetry!(
+You can also use `AICall` as a functor to trigger the AI call with a `UserMessage` or simply the text to send:

julia aicall(UserMessage("Hello, world!")) # Triggers the lazy call result = run!(aicall) # Explicitly runs the AI call ``` This can be used to "reply" to previous message / continue the stored conversation

Notes

  • The AICall struct is a key component in building flexible and efficient Agentic pipelines

  • The lazy evaluation model allows for setting up the call parameters in advance and deferring the actual execution until it is explicitly triggered.

  • This struct is particularly useful in scenarios where the timing of AI function execution needs to be deferred or where multiple potential calls need to be prepared and selectively executed.

source


# PromptingTools.last_outputFunction.

Extracts the last output (generated text answer) from the RAGResult.

source

Helpful accessor for AICall blocks. Returns the last output in the conversation (eg, the string/data in the last message).

source

Helpful accessor for the last generated output (msg.content) in conversation. Returns the last output in the conversation (eg, the string/data in the last message).

source


# PromptingTools.last_messageFunction.
julia
PT.last_message(result::RAGResult)

Extract the last message from the RAGResult. It looks for final_answer first, then answer fields in the conversations dictionary. Returns nothing if not found.

source

Helpful accessor for AICall blocks. Returns the last message in the conversation.

source

Helpful accessor for the last message in conversation. Returns the last message in the conversation.

source


# PromptingTools.Experimental.AgentTools.airetry!Function.
julia
airetry!(
     f_cond::Function, aicall::AICallBlock, feedback::Union{AbstractString, Function} = "";
     verbose::Bool = true, throw::Bool = false, evaluate_all::Bool = true, feedback_expensive::Bool = false,
     max_retries::Union{Nothing, Int} = nothing, retry_delay::Union{Nothing, Int} = nothing)

Evaluates the condition f_cond on the aicall object. If the condition is not met, it will return the best sample to retry from and provide feedback (string or function) to aicall. That's why it's mutating. It will retry maximum max_retries times, with throw=true, an error will be thrown if the condition is not met after max_retries retries.

Note: aicall must be run first via run!(aicall) before calling airetry!.

Function signatures

  • f_cond(aicall::AICallBlock) -> Bool, ie, it must accept the aicall object and return a boolean value.

  • feedback can be a string or feedback(aicall::AICallBlock) -> String, ie, it must accept the aicall object and return a string.

You can leverage the last_message, last_output, and AICode functions to access the last message, last output and execute code blocks in the conversation, respectively. See examples below.

Good Use Cases

  • Retry with API failures/drops (add retry_delay=2 to wait 2s between retries)

  • Check the output format / type / length / etc

  • Check the output with aiclassify call (LLM Judge) to catch unsafe/NSFW/out-of-scope content

  • Provide hints to the model to guide it to the correct answer

Gotchas

  • If controlling keyword arguments are set to nothing, they will fall back to the default values in aicall.config. You can override them by passing the keyword arguments explicitly.

  • If there multiple airetry! checks, they are evaluted sequentially. As long as throw==false, they will be all evaluated even if they failed previous checks.

  • Only samples which passed previous evaluations are evaluated (sample.success is true). If there are no successful samples, the function will evaluate only the active sample (aicall.active_sample_id) and nothing else.

  • Feedback from all "ancestor" evaluations is added upon retry, not feedback from the "sibblings" or other branches. To have only ONE long BRANCH (no sibblings), make sure to keep RetryConfig(; n_samples=1). That way the model will always see ALL previous feedback.

  • We implement a version of Monte Carlo Tree Search (MCTS) to always pick the most promising sample to restart from (you can tweak the options in RetryConfig to change the behaviour).

  • For large number of parallel branches (ie, "shallow and wide trees"), you might benefit from switching scoring to scoring=ThompsonSampling() (similar to how Bandit algorithms work).

  • Open-source/local models can struggle with too long conversation, you might want to experiment with in-place feedback (set RetryConfig(; feedback_inplace=true)).

Arguments

  • f_cond::Function: A function that accepts the aicall object and returns a boolean value. Retry will be attempted if the condition is not met (f_cond -> false).

  • aicall::AICallBlock: The aicall object to evaluate the condition on.

  • feedback::Union{AbstractString, Function}: Feedback to provide if the condition is not met. If a function is provided, it must accept the aicall object as the only argument and return a string.

  • verbose::Integer=1: A verbosity level for logging the retry attempts and warnings. A higher value indicates more detailed logging.

  • throw::Bool=false: If true, it will throw an error if the function f_cond does not return true after max_retries retries.

  • evaluate_all::Bool=false: If true, it will evaluate all the "successful" samples in the aicall object. Otherwise, it will only evaluate the active sample.

  • feedback_expensive::Bool=false: If false, it will provide feedback to all samples that fail the condition. If feedback function is expensive to call (eg, another ai* function), set this to true and feedback will be provided only to the sample we will retry from.

  • max_retries::Union{Nothing, Int}=nothing: Maximum number of retries. If not provided, it will fall back to the max_retries in aicall.config.

  • retry_delay::Union{Nothing, Int}=nothing: Delay between retries in seconds. If not provided, it will fall back to the retry_delay in aicall.config.

Returns

  • The aicall object with the updated conversation, and samples (saves the evaluations and their scores/feedback).

Example

You can use airetry! to catch API errors in run! and auto-retry the call. RetryConfig is how you influence all the subsequent retry behaviours - see ?RetryConfig for more details.

julia
# API failure because of a non-existent model
@@ -221,7 +221,7 @@
 ## ID: 32991, Guess: 50
 ## ID: 32991, Guess: 35
 ## ID: 32991, Guess: 33
-## etc...

Note that if there are multiple "branches" the model will see only the feedback of its own and its ancestors not the other "branches". If you wanted to provide ALL feedback, set RetryConfig(; n_samples=1) to remove any "branching". It fixing will be done sequentially in one conversation and the model will see all feedback (less powerful if the model falls into a bad state). Alternatively, you can tweak the feedback function.

See Also

References: airetry is inspired by the Language Agent Tree Search paper and by DSPy Assertions paper.

source


# PromptingTools.Experimental.AgentTools.print_samplesFunction.

Pretty prints the samples tree starting from node. Usually, node is the root of the tree. Example: print_samples(aicall.samples).

source


# PromptingTools.AICodeType.
julia
AICode(code::AbstractString; auto_eval::Bool=true, safe_eval::Bool=false, 
+## etc...

Note that if there are multiple "branches" the model will see only the feedback of its own and its ancestors not the other "branches". If you wanted to provide ALL feedback, set RetryConfig(; n_samples=1) to remove any "branching". It fixing will be done sequentially in one conversation and the model will see all feedback (less powerful if the model falls into a bad state). Alternatively, you can tweak the feedback function.

See Also

References: airetry is inspired by the Language Agent Tree Search paper and by DSPy Assertions paper.

source


# PromptingTools.Experimental.AgentTools.print_samplesFunction.

Pretty prints the samples tree starting from node. Usually, node is the root of the tree. Example: print_samples(aicall.samples).

source


# PromptingTools.AICodeType.
julia
AICode(code::AbstractString; auto_eval::Bool=true, safe_eval::Bool=false, 
 skip_unsafe::Bool=false, capture_stdout::Bool=true, verbose::Bool=false,
 prefix::AbstractString="", suffix::AbstractString="", remove_tests::Bool=false, execution_timeout::Int = 60)
 
@@ -246,15 +246,15 @@
 code.code |> clipboard
 
 # or execute it in the current module (=Main)
-eval(code.expression)

source


# PromptingTools.Experimental.AgentTools.aicodefixer_feedbackFunction.
julia
aicodefixer_feedback(cb::AICode; max_length::Int = 512) -> NamedTuple(; feedback::String)
+eval(code.expression)

source


# PromptingTools.Experimental.AgentTools.aicodefixer_feedbackFunction.
julia
aicodefixer_feedback(cb::AICode; max_length::Int = 512) -> NamedTuple(; feedback::String)
 aicodefixer_feedback(conversation::AbstractVector{<:PT.AbstractMessage}; max_length::Int = 512) -> NamedTuple(; feedback::String)
 aicodefixer_feedback(msg::PT.AIMessage; max_length::Int = 512) -> NamedTuple(; feedback::String)
 aicodefixer_feedback(aicall::AICall; max_length::Int = 512) -> NamedTuple(; feedback::String)

Generate feedback for an AI code fixing session based on the AICode block /or conversation history (that will be used to extract and evaluate a code block). Function is designed to be extensible for different types of feedback and code evaluation outcomes.

The highlevel wrapper accepts a conversation and returns new kwargs for the AICall.

Individual feedback functions are dispatched on different subtypes of AbstractCodeOutcome and can be extended/overwritten to provide more detailed feedback.

See also: AIGenerate, AICodeFixer

Arguments

  • cb::AICode: AICode block to evaluate and provide feedback on.

  • max_length::Int=512: An optional argument that specifies the maximum length of the feedback message.

Returns

  • NamedTuple: A feedback message as a kwarg in NamedTuple based on the analysis of the code provided in the conversation.

Example

julia
cb = AICode(msg; skip_unsafe = true, capture_stdout = true)
 new_kwargs = aicodefixer_feedback(cb)
 
 new_kwargs = aicodefixer_feedback(msg)
-new_kwargs = aicodefixer_feedback(conversation)

Notes

This function is part of the AI code fixing system, intended to interact with code in AIMessage and provide feedback on improving it.

The highlevel wrapper accepts a conversation and returns new kwargs for the AICall.

It dispatches for the code feedback based on the subtypes of AbstractCodeOutcome below:

  • CodeEmpty: No code found in the message.

  • CodeFailedParse: Code parsing error.

  • CodeFailedEval: Runtime evaluation error.

  • CodeFailedTimeout: Code execution timed out.

  • CodeSuccess: Successful code execution.

You can override the individual methods to customize the feedback.

source


# PromptingTools.Experimental.AgentTools.error_feedbackFunction.
julia
error_feedback(e::Any; max_length::Int = 512)

Set of specialized methods to provide feedback on different types of errors (e).

source


- +new_kwargs = aicodefixer_feedback(conversation)

Notes

This function is part of the AI code fixing system, intended to interact with code in AIMessage and provide feedback on improving it.

The highlevel wrapper accepts a conversation and returns new kwargs for the AICall.

It dispatches for the code feedback based on the subtypes of AbstractCodeOutcome below:

You can override the individual methods to customize the feedback.

source


# PromptingTools.Experimental.AgentTools.error_feedbackFunction.
julia
error_feedback(e::Any; max_length::Int = 512)

Set of specialized methods to provide feedback on different types of errors (e).

source


+ \ No newline at end of file diff --git a/previews/PR218/extra_tools/api_tools_intro.html b/previews/PR218/extra_tools/api_tools_intro.html index ae7e93df2..8157bbbaa 100644 --- a/previews/PR218/extra_tools/api_tools_intro.html +++ b/previews/PR218/extra_tools/api_tools_intro.html @@ -8,19 +8,19 @@ - + - + - +
Skip to content

APITools Introduction

APITools is an experimental module wrapping helpful APIs for working with and enhancing GenerativeAI models.

Import the module as follows:

julia
using PromptingTools.Experimental.APITools

Highlights

Currently, there is only one function in this module create_websearch that leverages Tavily.com search and answer engine to provide additional context.

You need to sign up for an API key at Tavily.com and set it as an environment variable TAVILY_API_KEY to use this function.

References

# PromptingTools.Experimental.APITools.create_websearchFunction.
julia
create_websearch(query::AbstractString;
     api_key::AbstractString,
-    search_depth::AbstractString = "basic")

Arguments

  • query::AbstractString: The query to search for.

  • api_key::AbstractString: The API key to use for the search. Get an API key from Tavily.

  • search_depth::AbstractString: The depth of the search. Can be either "basic" or "advanced". Default is "basic". Advanced search calls equal to 2 requests.

  • include_answer::Bool: Whether to include the answer in the search results. Default is false.

  • include_raw_content::Bool: Whether to include the raw content in the search results. Default is false.

  • max_results::Integer: The maximum number of results to return. Default is 5.

  • include_images::Bool: Whether to include images in the search results. Default is false.

  • include_domains::AbstractVector{<:AbstractString}: A list of domains to include in the search results. Default is an empty list.

  • exclude_domains::AbstractVector{<:AbstractString}: A list of domains to exclude from the search results. Default is an empty list.

Example

julia
r = create_websearch("Who is King Charles?")

Even better, you can get not just the results but also the answer:

julia
r = create_websearch("Who is King Charles?"; include_answer = true)

See Rest API documentation for more information.

source


- + search_depth::AbstractString = "basic")

Arguments

Example

julia
r = create_websearch("Who is King Charles?")

Even better, you can get not just the results but also the answer:

julia
r = create_websearch("Who is King Charles?"; include_answer = true)

See Rest API documentation for more information.

source


+ \ No newline at end of file diff --git a/previews/PR218/extra_tools/rag_tools_intro.html b/previews/PR218/extra_tools/rag_tools_intro.html index c56497519..037b871b9 100644 --- a/previews/PR218/extra_tools/rag_tools_intro.html +++ b/previews/PR218/extra_tools/rag_tools_intro.html @@ -8,11 +8,11 @@ - + - + - + @@ -137,7 +137,7 @@ # Assuming `test_files` is a vector of file paths indexer = SimpleIndexer(chunker=FileChunker(), tagger=OpenTagger()) index = build_index(indexer, test_files; - chunker_kwargs(; separators=[". "]), verbose=true)

Notes

source

julia
build_index(
+        chunker_kwargs(; separators=[". "]), verbose=true)

Notes

source

julia
build_index(
     indexer::KeywordsIndexer, files_or_docs::Vector{<:AbstractString};
     verbose::Integer = 1,
     extras::Union{Nothing, AbstractVector} = nothing,
@@ -149,7 +149,7 @@
     tagger::AbstractTagger = indexer.tagger,
     tagger_kwargs::NamedTuple = NamedTuple(),
     api_kwargs::NamedTuple = NamedTuple(),
-    cost_tracker = Threads.Atomic{Float64}(0.0))

Builds a ChunkKeywordsIndex from the provided files or documents to support keyword-based search (BM25).

source


# PromptingTools.Experimental.RAGTools.airagFunction.
julia
airag(cfg::AbstractRAGConfig, index::AbstractDocumentIndex;
+    cost_tracker = Threads.Atomic{Float64}(0.0))

Builds a ChunkKeywordsIndex from the provided files or documents to support keyword-based search (BM25).

source


# PromptingTools.Experimental.RAGTools.airagFunction.
julia
airag(cfg::AbstractRAGConfig, index::AbstractDocumentIndex;
     question::AbstractString,
     verbose::Integer = 1, return_all::Bool = false,
     api_kwargs::NamedTuple = NamedTuple(),
@@ -196,7 +196,7 @@
 result = airag(cfg, multi_index; question, return_all=true)
 
 # Pretty-print the result
-PT.pprint(result)

For easier manipulation of nested kwargs, see utilities getpropertynested, setpropertynested, merge_kwargs_nested.

source


# PromptingTools.Experimental.RAGTools.retrieveFunction.
julia
retrieve(retriever::AbstractRetriever,
+PT.pprint(result)

For easier manipulation of nested kwargs, see utilities getpropertynested, setpropertynested, merge_kwargs_nested.

source


# PromptingTools.Experimental.RAGTools.retrieveFunction.
julia
retrieve(retriever::AbstractRetriever,
     index::AbstractChunkIndex,
     question::AbstractString;
     verbose::Integer = 1,
@@ -237,7 +237,7 @@
     rephraser_kwargs = (; model = "custom"),
     embedder_kwargs = (; model = "custom"),
     tagger_kwargs = (; model = "custom"), api_kwargs = (;
-        url = "http://localhost:8080"))

source


# PromptingTools.Experimental.RAGTools.generate!Function.
julia
generate!(
+        url = "http://localhost:8080"))

source


# PromptingTools.Experimental.RAGTools.generate!Function.
julia
generate!(
     generator::AbstractGenerator, index::AbstractDocumentIndex, result::AbstractRAGResult;
     verbose::Integer = 1,
     api_kwargs::NamedTuple = NamedTuple(),
@@ -258,7 +258,7 @@
 result = retrieve(index, question)
 
 # Generate the answer using the default generator, mutates the same result
-result = generate!(index, result)

source


# PromptingTools.Experimental.RAGTools.annotate_supportFunction.
julia
annotate_support(annotater::TrigramAnnotater, answer::AbstractString,
+result = generate!(index, result)

source


# PromptingTools.Experimental.RAGTools.annotate_supportFunction.
julia
annotate_support(annotater::TrigramAnnotater, answer::AbstractString,
     context::AbstractVector; min_score::Float64 = 0.5,
     skip_trigrams::Bool = true, hashed::Bool = true,
     sources::Union{Nothing, AbstractVector{<:AbstractString}} = nothing,
@@ -270,7 +270,7 @@
 answer = "This is a test context. Another context sentence."
 
 annotated_root = annotate_support(annotater, answer, context)
-pprint(annotated_root) # pretty print the annotated tree

source

julia
annotate_support(
+pprint(annotated_root) # pretty print the annotated tree

source

julia
annotate_support(
     annotater::TrigramAnnotater, result::AbstractRAGResult; min_score::Float64 = 0.5,
     skip_trigrams::Bool = true, hashed::Bool = true,
     min_source_score::Float64 = 0.25,
@@ -278,12 +278,12 @@
     add_scores::Bool = true, kwargs...)

Dispatch for annotate_support for AbstractRAGResult type. It extracts the final_answer and context from the result and calls annotate_support with them.

See annotate_support for more details.

Example

julia
res = RAGResult(; question = "", final_answer = "This is a test.",
     context = ["Test context.", "Completely different"])
 annotated_root = annotate_support(annotater, res)
-PT.pprint(annotated_root)

source


# PromptingTools.Experimental.RAGTools.build_qa_evalsFunction.
julia
build_qa_evals(doc_chunks::Vector{<:AbstractString}, sources::Vector{<:AbstractString};
+PT.pprint(annotated_root)

source


# PromptingTools.Experimental.RAGTools.build_qa_evalsFunction.
julia
build_qa_evals(doc_chunks::Vector{<:AbstractString}, sources::Vector{<:AbstractString};
                model=PT.MODEL_CHAT, instructions="None.", qa_template::Symbol=:RAGCreateQAFromContext, 
                verbose::Bool=true, api_kwargs::NamedTuple = NamedTuple(), kwargs...) -> Vector{QAEvalItem}

Create a collection of question and answer evaluations (QAEvalItem) from document chunks and sources. This function generates Q&A pairs based on the provided document chunks, using a specified AI model and template.

Arguments

Returns

Vector{QAEvalItem}: A vector of QAEvalItem structs, each containing a source, context, question, and answer. Invalid or empty items are filtered out.

Notes

Examples

Creating Q&A evaluations from a set of document chunks:

julia
doc_chunks = ["Text from document 1", "Text from document 2"]
 sources = ["source1", "source2"]
-qa_evals = build_qa_evals(doc_chunks, sources)

source


- +qa_evals = build_qa_evals(doc_chunks, sources)

source


+ \ No newline at end of file diff --git a/previews/PR218/extra_tools/text_utilities_intro.html b/previews/PR218/extra_tools/text_utilities_intro.html index dca63429d..ffa9d4203 100644 --- a/previews/PR218/extra_tools/text_utilities_intro.html +++ b/previews/PR218/extra_tools/text_utilities_intro.html @@ -8,11 +8,11 @@ - + - + - + @@ -21,18 +21,18 @@ chunks = recursive_splitter(text; max_length=13) length(chunks) # Output: 2

Using a custom separator and custom max_length

julia
text = "Hello,World," ^ 2900 # length 34900 chars
 recursive_splitter(text; separator=",", max_length=10000) # for 4K context window
-length(chunks[1]) # Output: 4

source

julia
recursive_splitter(text::AbstractString, separators::Vector{String}; max_length::Int=35000) -> Vector{String}

Split a given string text into chunks recursively using a series of separators, with each chunk having a maximum length of max_length (if it's achievable given the separators provided). This function is useful for splitting large documents or texts into smaller segments that are more manageable for processing, particularly for models or systems with limited context windows.

It was previously known as split_by_length.

This is similar to Langchain's RecursiveCharacterTextSplitter. To achieve the same behavior, use separators=["\n\n", "\n", " ", ""].

Arguments

Returns

Vector{String}: A vector of strings, where each string is a chunk of the original text that is smaller than or equal to max_length.

Usage Tips

How It Works

Examples

Splitting text using multiple separators:

julia
text = "Paragraph 1\n\nParagraph 2. Sentence 1. Sentence 2.\nParagraph 3"
+length(chunks[1]) # Output: 4

source

julia
recursive_splitter(text::AbstractString, separators::Vector{String}; max_length::Int=35000) -> Vector{String}

Split a given string text into chunks recursively using a series of separators, with each chunk having a maximum length of max_length (if it's achievable given the separators provided). This function is useful for splitting large documents or texts into smaller segments that are more manageable for processing, particularly for models or systems with limited context windows.

It was previously known as split_by_length.

This is similar to Langchain's RecursiveCharacterTextSplitter. To achieve the same behavior, use separators=["\n\n", "\n", " ", ""].

Arguments

Returns

Vector{String}: A vector of strings, where each string is a chunk of the original text that is smaller than or equal to max_length.

Usage Tips

How It Works

Examples

Splitting text using multiple separators:

julia
text = "Paragraph 1\n\nParagraph 2. Sentence 1. Sentence 2.\nParagraph 3"
 separators = ["\n\n", ". ", "\n"] # split by paragraphs, sentences, and newlines (not by words)
 chunks = recursive_splitter(text, separators, max_length=20)

Splitting text using multiple separators - with splitting on words:

julia
text = "Paragraph 1\n\nParagraph 2. Sentence 1. Sentence 2.\nParagraph 3"
 separators = ["\n\n", ". ", "\n", " "] # split by paragraphs, sentences, and newlines, words
 chunks = recursive_splitter(text, separators, max_length=10)

Using a single separator:

julia
text = "Hello,World," ^ 2900  # length 34900 characters
 chunks = recursive_splitter(text, [","], max_length=10000)

To achieve the same behavior as Langchain's RecursiveCharacterTextSplitter, use separators=["\n\n", "\n", " ", ""].

julia
text = "Paragraph 1\n\nParagraph 2. Sentence 1. Sentence 2.\nParagraph 3"
 separators = ["\n\n", "\n", " ", ""]
-chunks = recursive_splitter(text, separators, max_length=10)

source


# PromptingTools.replace_wordsFunction.
julia
replace_words(text::AbstractString, words::Vector{<:AbstractString}; replacement::AbstractString="ABC")

Replace all occurrences of words in words with replacement in text. Useful to quickly remove specific names or entities from a text.

Arguments

Example

julia
text = "Disney is a great company"
+chunks = recursive_splitter(text, separators, max_length=10)

source


# PromptingTools.replace_wordsFunction.
julia
replace_words(text::AbstractString, words::Vector{<:AbstractString}; replacement::AbstractString="ABC")

Replace all occurrences of words in words with replacement in text. Useful to quickly remove specific names or entities from a text.

Arguments

Example

julia
text = "Disney is a great company"
 replace_words(text, ["Disney", "Snow White", "Mickey Mouse"])
-# Output: "ABC is a great company"

source


# PromptingTools.wrap_stringFunction.
julia
wrap_string(str::String,
+# Output: "ABC is a great company"

source


# PromptingTools.wrap_stringFunction.
julia
wrap_string(str::String,
     text_width::Int = 20;
-    newline::Union{AbstractString, AbstractChar} = '

')

Breaks a string into lines of a given text_width. Optionally, you can specify the newline character or string to use.

Example:

julia
wrap_string("Certainly, here's a function in Julia that will wrap a string according to the specifications:", 10) |> print

source


# PromptingTools.length_longest_common_subsequenceFunction.
julia
length_longest_common_subsequence(itr1::AbstractString, itr2::AbstractString)

Compute the length of the longest common subsequence between two string sequences (ie, the higher the number, the better the match).

Source: https://cn.julialang.org/LeetCode.jl/dev/democards/problems/problems/1143.longest-common-subsequence/

Arguments

Returns

The length of the longest common subsequence.

Examples

julia
text1 = "abc-abc----"
+    newline::Union{AbstractString, AbstractChar} = '

')

Breaks a string into lines of a given text_width. Optionally, you can specify the newline character or string to use.

Example:

julia
wrap_string("Certainly, here's a function in Julia that will wrap a string according to the specifications:", 10) |> print

source


# PromptingTools.length_longest_common_subsequenceFunction.
julia
length_longest_common_subsequence(itr1::AbstractString, itr2::AbstractString)

Compute the length of the longest common subsequence between two string sequences (ie, the higher the number, the better the match).

Source: https://cn.julialang.org/LeetCode.jl/dev/democards/problems/problems/1143.longest-common-subsequence/

Arguments

Returns

The length of the longest common subsequence.

Examples

julia
text1 = "abc-abc----"
 text2 = "___ab_c__abc"
 longest_common_subsequence(text1, text2)
 # Output: 6 (-> "abcabc")

It can be used to fuzzy match strings and find the similarity between them (Tip: normalize the match)

julia
commands = ["product recommendation", "emotions", "specific product advice", "checkout advice"]
@@ -43,7 +43,7 @@
     @info "The closest command to the query: "$(query)" is: "$(commands[pos])" (distance: $(dist), normalized: $(norm))"
 end

But it might be easier to use directly the convenience wrapper distance_longest_common_subsequence!


 
-[source](https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/utils.jl#L252-L288)
+[source](https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/utils.jl#L252-L288)
 
 </div>
 <br>
@@ -71,8 +71,8 @@
     """
 
 dist = distance_longest_common_subsequence(story, context)
-@info "The closest context to the query: "$(first(story,20))..." is: "$(context[argmin(dist)])" (distance: $(minimum(dist)))"

source


- +@info "The closest context to the query: "$(first(story,20))..." is: "$(context[argmin(dist)])" (distance: $(minimum(dist)))"

source


+ \ No newline at end of file diff --git a/previews/PR218/frequently_asked_questions.html b/previews/PR218/frequently_asked_questions.html index f73ca2751..9045ad048 100644 --- a/previews/PR218/frequently_asked_questions.html +++ b/previews/PR218/frequently_asked_questions.html @@ -8,9 +8,9 @@ - + - + @@ -130,7 +130,7 @@ wrap_schema = OpenAISchema() |> TracerSchema |> SaverSchema conv = aigenerate(wrap_schema,:BlankSystemUser; system="You're a French-speaking assistant!", user="Say hi!"; model="gpt-4", api_kwargs=(;temperature=0.1), return_all=true)

conv is a vector of tracing messages that will be saved to a JSON together with metadata about the template and api_kwargs.

If you would like to enable this behavior automatically, you can register your favorite model (or re-register existing models) with the "wrapped" schema:

julia
PT.register_model!(; name= "gpt-3.5-turbo", schema=OpenAISchema() |> TracerSchema |> SaverSchema)
- + \ No newline at end of file diff --git a/previews/PR218/getting_started.html b/previews/PR218/getting_started.html index 8ef9e2c86..7f9925e36 100644 --- a/previews/PR218/getting_started.html +++ b/previews/PR218/getting_started.html @@ -8,9 +8,9 @@ - + - + @@ -23,7 +23,7 @@ ai"What is the capital of \$(country)?"
plaintext
[ Info: Tokens: 32 @ Cost: $0.0001 in 0.5 seconds
 AIMessage("The capital of Spain is Madrid.")

Pro tip: Use after-string-flags to select the model to be called, eg, ai"What is the capital of France?"gpt4 (use gpt4t for the new GPT-4 Turbo model). Great for those extra hard questions!

Using aigenerate with placeholders

For more complex prompt templates, you can use handlebars-style templating and provide variables as keyword arguments:

julia
msg = aigenerate("What is the capital of {{country}}? Is the population larger than {{population}}?", country="Spain", population="1M")
plaintext
[ Info: Tokens: 74 @ Cost: $0.0001 in 1.3 seconds
 AIMessage("The capital of Spain is Madrid. And yes, the population of Madrid is larger than 1 million. As of 2020, the estimated population of Madrid is around 3.3 million people.")

Pro tip: Use asyncmap to run multiple AI-powered tasks concurrently.

Pro tip: If you use slow models (like GPT-4), you can use the asynchronous version of @ai_str -> @aai_str to avoid blocking the REPL, eg, aai"Say hi but slowly!"gpt4 (similarly @ai!_str -> @aai!_str for multi-turn conversations).

For more practical examples, see the Various Examples section.

- + \ No newline at end of file diff --git a/previews/PR218/hashmap.json b/previews/PR218/hashmap.json index f99481007..104b47f00 100644 --- a/previews/PR218/hashmap.json +++ b/previews/PR218/hashmap.json @@ -1 +1 @@ -{"coverage_of_model_providers.md":"DbJJ6fa9","examples_building_rag.md":"ubiEVhnz","examples_readme_examples.md":"C3bAj1O-","examples_working_with_aitemplates.md":"B32dEK-K","examples_working_with_custom_apis.md":"DdeAps4a","examples_working_with_google_ai_studio.md":"DMQGNet4","examples_working_with_ollama.md":"dzClgWPP","extra_tools_agent_tools_intro.md":"cSVCxyyT","extra_tools_api_tools_intro.md":"Dbv02Yyd","extra_tools_rag_tools_intro.md":"CJjL2hwq","extra_tools_text_utilities_intro.md":"B_lqAVJR","frequently_asked_questions.md":"BuxY2KpU","getting_started.md":"Ch3ZoUfB","how_it_works.md":"C5N48RQE","index.md":"DG6QJRYA","prompts_agents.md":"CDfkF1zZ","prompts_classification.md":"C0D5VEQ-","prompts_critic.md":"DlYODwXP","prompts_extraction.md":"CZZSvmqA","prompts_general.md":"Lurz96ex","prompts_persona-task.md":"Cli7EsGS","prompts_rag.md":"BQVbDzLT","prompts_visual.md":"BRjUIOC-","reference.md":"DMO0NW9D","reference_agenttools.md":"D536xaTJ","reference_apitools.md":"DVDc6Kr1","reference_experimental.md":"BNmmZjbx","reference_ragtools.md":"gYCqDEbn"} +{"coverage_of_model_providers.md":"DbJJ6fa9","examples_building_rag.md":"ubiEVhnz","examples_readme_examples.md":"C3bAj1O-","examples_working_with_aitemplates.md":"B32dEK-K","examples_working_with_custom_apis.md":"DdeAps4a","examples_working_with_google_ai_studio.md":"DMQGNet4","examples_working_with_ollama.md":"dzClgWPP","extra_tools_agent_tools_intro.md":"C6nMFv4B","extra_tools_api_tools_intro.md":"DRt6snGq","extra_tools_rag_tools_intro.md":"oi28ZdI4","extra_tools_text_utilities_intro.md":"Cls15k4M","frequently_asked_questions.md":"BuxY2KpU","getting_started.md":"Ch3ZoUfB","how_it_works.md":"C5N48RQE","index.md":"DG6QJRYA","prompts_agents.md":"CDfkF1zZ","prompts_classification.md":"C0D5VEQ-","prompts_critic.md":"DlYODwXP","prompts_extraction.md":"CZZSvmqA","prompts_general.md":"Lurz96ex","prompts_persona-task.md":"Cli7EsGS","prompts_rag.md":"BQVbDzLT","prompts_visual.md":"BRjUIOC-","reference.md":"Bl4MWKuL","reference_agenttools.md":"CE_B_eQV","reference_apitools.md":"BVdQH4AZ","reference_experimental.md":"DW1f4gT-","reference_ragtools.md":"Bby7eP61"} diff --git a/previews/PR218/how_it_works.html b/previews/PR218/how_it_works.html index 273b15d9a..9ac51e63b 100644 --- a/previews/PR218/how_it_works.html +++ b/previews/PR218/how_it_works.html @@ -8,9 +8,9 @@ - + - + @@ -113,7 +113,7 @@ food = JSON3.read(last_output(result), Food) ## [ Info: Condition not met. Retrying... ## Output: Food("apple", ["delicious", "juicy"])

It took 1 retry (see result.config.retries) and we have the correct output from an open-source model!

If you're interested in the result object, it's a struct (AICall) with a field conversation, which holds the conversation up to this point. AIGenerate is an alias for AICall using aigenerate function. See ?AICall (the underlying struct type) for more details on the fields and methods available.

- + \ No newline at end of file diff --git a/previews/PR218/index.html b/previews/PR218/index.html index ab3827bb1..88b48489f 100644 --- a/previews/PR218/index.html +++ b/previews/PR218/index.html @@ -8,9 +8,9 @@ - + - + @@ -22,7 +22,7 @@ # Requires OPENAI_API_KEY environment variable! ai"What is the meaning of life?"

For more information, see the Getting Started section.


Ready to simplify your GenerativeAI tasks? Dive into PromptingTools.jl now and unlock your productivity.

Building a More Advanced Workflow?

PromptingTools offers many advanced features:

and more!

- + \ No newline at end of file diff --git a/previews/PR218/prompts/RAG.html b/previews/PR218/prompts/RAG.html index aac91f493..a952b541d 100644 --- a/previews/PR218/prompts/RAG.html +++ b/previews/PR218/prompts/RAG.html @@ -8,9 +8,9 @@ - + - + @@ -252,7 +252,7 @@ In this process, you strip out information that is not relevant for the retrieval task.

User Prompt:

plaintext
Here is the user query: {{query}}
 
 Rephrased query:
- + \ No newline at end of file diff --git a/previews/PR218/prompts/agents.html b/previews/PR218/prompts/agents.html index eee4f139a..ff2c539a0 100644 --- a/previews/PR218/prompts/agents.html +++ b/previews/PR218/prompts/agents.html @@ -8,9 +8,9 @@ - + - + @@ -90,7 +90,7 @@ Take a deep break. Think step-by-step and fix the above errors. I believe in you. You can do it! I also need code, actual working Julia code, no shortcuts.

Feedback Templates

Template: FeedbackFromEvaluator

System Prompt:

plaintext

User Prompt:

plaintext
### Feedback from Evaluator
 {{feedback}}
- + \ No newline at end of file diff --git a/previews/PR218/prompts/classification.html b/previews/PR218/prompts/classification.html index d45b5a081..01b97dabc 100644 --- a/previews/PR218/prompts/classification.html +++ b/previews/PR218/prompts/classification.html @@ -8,9 +8,9 @@ - + - + @@ -47,7 +47,7 @@ - If none of the endpoint categories are appropriate for the given input, select the choice indicating that no category fits.

User Prompt:

plaintext
User Question: {{question}}
 
 Endpoint Choice:
- + \ No newline at end of file diff --git a/previews/PR218/prompts/critic.html b/previews/PR218/prompts/critic.html index 48e2096be..7d1d19274 100644 --- a/previews/PR218/prompts/critic.html +++ b/previews/PR218/prompts/critic.html @@ -8,9 +8,9 @@ - + - + @@ -103,7 +103,7 @@ Remember to follow the three-step workflow: Reflection, Suggestions, Outcome. Julia Expert says: - + \ No newline at end of file diff --git a/previews/PR218/prompts/extraction.html b/previews/PR218/prompts/extraction.html index 54665d53e..373430f64 100644 --- a/previews/PR218/prompts/extraction.html +++ b/previews/PR218/prompts/extraction.html @@ -8,9 +8,9 @@ - + - + @@ -24,7 +24,7 @@ </data>

Extraction Templates

Template: ExtractData

System Prompt:

plaintext
You are a world-class expert for function-calling and data extraction. Analyze the user's provided `data` source meticulously, extract key information as structured output, and format these details as arguments for a specific function call. Ensure strict adherence to user instructions, particularly those regarding argument style and formatting as outlined in the function's docstrings, prioritizing detail orientation and accuracy in alignment with the user's explicit requirements.

User Prompt:

plaintext
# Data
 
 {{data}}
- + \ No newline at end of file diff --git a/previews/PR218/prompts/general.html b/previews/PR218/prompts/general.html index a7e103787..ab560b467 100644 --- a/previews/PR218/prompts/general.html +++ b/previews/PR218/prompts/general.html @@ -8,9 +8,9 @@ - + - + @@ -20,7 +20,7 @@
Skip to content

The following file is auto-generated from the templates folder. For any changes, please modify the source files in the templates folder.

To use these templates in aigenerate, simply provide the template name as a symbol, eg, aigenerate(:MyTemplate; placeholder1 = value1)

General Templates

Template: BlankSystemUser

  • Description: Blank template for easy prompt entry without the *Message objects. Simply provide keyword arguments for system (=system prompt/persona) and user (=user/task/data prompt). Placeholders: system, user

  • Placeholders: system, user

  • Word count: 18

  • Source:

  • Version: 1.1

System Prompt:

plaintext
{{system}}

User Prompt:

plaintext
{{user}}

Template: PromptEngineerForTask

  • Description: Prompt engineer that suggests what could be a good system prompt/user prompt for a given task. Placeholder: task

  • Placeholders: task

  • Word count: 402

  • Source:

  • Version: 1

System Prompt:

plaintext
You are a world-class prompt engineering assistant. Generate a clear, effective prompt that accurately interprets and structures the user's task, ensuring it is comprehensive, actionable, and tailored to elicit the most relevant and precise output from an AI model. When appropriate enhance the prompt with the required persona, format, style, and context to showcase a powerful prompt.

User Prompt:

plaintext
# Task
 
 {{task}}
- + \ No newline at end of file diff --git a/previews/PR218/prompts/persona-task.html b/previews/PR218/prompts/persona-task.html index 256715062..b189aa640 100644 --- a/previews/PR218/prompts/persona-task.html +++ b/previews/PR218/prompts/persona-task.html @@ -8,9 +8,9 @@ - + - + @@ -461,7 +461,7 @@ <special_instructions> {{instructions}} </special_instructions> - + \ No newline at end of file diff --git a/previews/PR218/prompts/visual.html b/previews/PR218/prompts/visual.html index fb931e8dd..dac40609b 100644 --- a/previews/PR218/prompts/visual.html +++ b/previews/PR218/prompts/visual.html @@ -8,9 +8,9 @@ - + - + @@ -31,7 +31,7 @@ Please generate the image.

Template: OCRTask

System Prompt:

plaintext
You are a world-class OCR engine. Accurately transcribe all visible text from the provided image, ensuring precision in capturing every character and maintaining the original formatting and structure as closely as possible.

User Prompt:

plaintext
# Task
 
 {{task}}
- + \ No newline at end of file diff --git a/previews/PR218/reference.html b/previews/PR218/reference.html index 3cb80ab28..15e4cc0cb 100644 --- a/previews/PR218/reference.html +++ b/previews/PR218/reference.html @@ -8,21 +8,21 @@ - + - + - + -
Skip to content

Reference

# PromptingTools.ALLOWED_PREFERENCESConstant.

Keys that are allowed to be set via set_preferences!

source


# PromptingTools.ALTERNATIVE_GENERATION_COSTSConstant.
julia
ALTERNATIVE_GENERATION_COSTS

Tracker of alternative costing models, eg, for image generation (dall-e-3), the cost is driven by quality/size.

source


# PromptingTools.ANTHROPIC_TOOL_PROMPTConstant.

Simple template to add to the System Message when doing data extraction with Anthropic models.

It has 2 placeholders: tool_name, tool_description and tool_parameters that are filled with the tool's name, description and parameters. Source: https://docs.anthropic.com/claude/docs/functions-external-tools

source


# PromptingTools.CONV_HISTORYConstant.
julia
CONV_HISTORY

Tracks the most recent conversations through the ai_str macros.

Preference available: MAX_HISTORY_LENGTH, which sets how many last messages should be remembered.

See also: push_conversation!, resize_conversation!

source


# PromptingTools.MODEL_ALIASESConstant.
julia
MODEL_ALIASES

A dictionary of model aliases. Aliases are used to refer to models by their aliases instead of their full names to make it more convenient to use them.

Accessing the aliases

PromptingTools.MODEL_ALIASES["gpt3"]

Register a new model alias

julia
PromptingTools.MODEL_ALIASES["gpt3"] = "gpt-3.5-turbo"

source


# PromptingTools.MODEL_REGISTRYConstant.
julia
MODEL_REGISTRY

A store of available model names and their specs (ie, name, costs per token, etc.)

Accessing the registry

You can use both the alias name or the full name to access the model spec:

PromptingTools.MODEL_REGISTRY["gpt-3.5-turbo"]

Registering a new model

julia
register_model!(
+    
Skip to content

Reference

# PromptingTools.ALLOWED_PREFERENCESConstant.

Keys that are allowed to be set via set_preferences!

source


# PromptingTools.ALTERNATIVE_GENERATION_COSTSConstant.
julia
ALTERNATIVE_GENERATION_COSTS

Tracker of alternative costing models, eg, for image generation (dall-e-3), the cost is driven by quality/size.

source


# PromptingTools.ANTHROPIC_TOOL_PROMPTConstant.

Simple template to add to the System Message when doing data extraction with Anthropic models.

It has 2 placeholders: tool_name, tool_description and tool_parameters that are filled with the tool's name, description and parameters. Source: https://docs.anthropic.com/claude/docs/functions-external-tools

source


# PromptingTools.CONV_HISTORYConstant.
julia
CONV_HISTORY

Tracks the most recent conversations through the ai_str macros.

Preference available: MAX_HISTORY_LENGTH, which sets how many last messages should be remembered.

See also: push_conversation!, resize_conversation!

source


# PromptingTools.MODEL_ALIASESConstant.
julia
MODEL_ALIASES

A dictionary of model aliases. Aliases are used to refer to models by their aliases instead of their full names to make it more convenient to use them.

Accessing the aliases

PromptingTools.MODEL_ALIASES["gpt3"]

Register a new model alias

julia
PromptingTools.MODEL_ALIASES["gpt3"] = "gpt-3.5-turbo"

source


# PromptingTools.MODEL_REGISTRYConstant.
julia
MODEL_REGISTRY

A store of available model names and their specs (ie, name, costs per token, etc.)

Accessing the registry

You can use both the alias name or the full name to access the model spec:

PromptingTools.MODEL_REGISTRY["gpt-3.5-turbo"]

Registering a new model

julia
register_model!(
     name = "gpt-3.5-turbo",
     schema = :OpenAISchema,
     cost_of_token_prompt = 0.0015,
     cost_of_token_generation = 0.002,
-    description = "GPT-3.5 Turbo is a 175B parameter model and a common default on the OpenAI API.")

Registering a model alias

julia
PromptingTools.MODEL_ALIASES["gpt3"] = "gpt-3.5-turbo"

source


# PromptingTools.OPENAI_TOKEN_IDS_GPT35_GPT4Constant.

Token IDs for GPT3.5 and GPT4 from https://platform.openai.com/tokenizer

source


# PromptingTools.PREFERENCESConstant.
julia
PREFERENCES

You can set preferences for PromptingTools by setting environment variables or by using the set_preferences!. It will create a LocalPreferences.toml file in your current directory and will reload your prefences from there.

Check your preferences by calling get_preferences(key::String).

Available Preferences (for set_preferences!)

  • OPENAI_API_KEY: The API key for the OpenAI API. See OpenAI's documentation for more information.

  • AZURE_OPENAI_API_KEY: The API key for the Azure OpenAI API. See Azure OpenAI's documentation for more information.

  • AZURE_OPENAI_HOST: The host for the Azure OpenAI API. See Azure OpenAI's documentation for more information.

  • MISTRALAI_API_KEY: The API key for the Mistral AI API. See Mistral AI's documentation for more information.

  • COHERE_API_KEY: The API key for the Cohere API. See Cohere's documentation for more information.

  • DATABRICKS_API_KEY: The API key for the Databricks Foundation Model API. See Databricks' documentation for more information.

  • DATABRICKS_HOST: The host for the Databricks API. See Databricks' documentation for more information.

  • TAVILY_API_KEY: The API key for the Tavily Search API. Register here. See more information here.

  • GOOGLE_API_KEY: The API key for Google Gemini models. Get yours from here. If you see a documentation page ("Available languages and regions for Google AI Studio and Gemini API"), it means that it's not yet available in your region.

  • ANTHROPIC_API_KEY: The API key for the Anthropic API. Get yours from here.

  • VOYAGE_API_KEY: The API key for the Voyage API. Free tier is upto 50M tokens! Get yours from here.

  • GROQ_API_KEY: The API key for the Groq API. Free in beta! Get yours from here.

  • DEEPSEEK_API_KEY: The API key for the DeepSeek API. Get 5 credit when you join. Get yours from here.

  • OPENROUTER_API_KEY: The API key for the OpenRouter API. Get yours from here.

  • CEREBRAS_API_KEY: The API key for the Cerebras API. Get yours from here.

  • MODEL_CHAT: The default model to use for aigenerate and most ai* calls. See MODEL_REGISTRY for a list of available models or define your own.

  • MODEL_EMBEDDING: The default model to use for aiembed (embedding documents). See MODEL_REGISTRY for a list of available models or define your own.

  • PROMPT_SCHEMA: The default prompt schema to use for aigenerate and most ai* calls (if not specified in MODEL_REGISTRY). Set as a string, eg, "OpenAISchema". See PROMPT_SCHEMA for more information.

  • MODEL_ALIASES: A dictionary of model aliases (alias => full_model_name). Aliases are used to refer to models by their aliases instead of their full names to make it more convenient to use them. See MODEL_ALIASES for more information.

  • MAX_HISTORY_LENGTH: The maximum length of the conversation history. Defaults to 5. Set to nothing to disable history. See CONV_HISTORY for more information.

  • LOCAL_SERVER: The URL of the local server to use for ai* calls. Defaults to http://localhost:10897/v1. This server is called when you call model="local" See ?LocalServerOpenAISchema for more information and examples.

  • LOG_DIR: The directory to save the logs to, eg, when using SaverSchema <: AbstractTracerSchema. Defaults to joinpath(pwd(), "log"). Refer to ?SaverSchema for more information on how it works and examples.

At the moment it is not possible to persist changes to MODEL_REGISTRY across sessions. Define your register_model!() calls in your startup.jl file to make them available across sessions or put them at the top of your script.

Available ENV Variables

  • OPENAI_API_KEY: The API key for the OpenAI API.

  • AZURE_OPENAI_API_KEY: The API key for the Azure OpenAI API.

  • AZURE_OPENAI_HOST: The host for the Azure OpenAI API. This is the URL built as https://<resource-name>.openai.azure.com.

  • MISTRALAI_API_KEY: The API key for the Mistral AI API.

  • COHERE_API_KEY: The API key for the Cohere API.

  • LOCAL_SERVER: The URL of the local server to use for ai* calls. Defaults to http://localhost:10897/v1. This server is called when you call model="local"

  • DATABRICKS_API_KEY: The API key for the Databricks Foundation Model API.

  • DATABRICKS_HOST: The host for the Databricks API.

  • TAVILY_API_KEY: The API key for the Tavily Search API. Register here. See more information here.

  • GOOGLE_API_KEY: The API key for Google Gemini models. Get yours from here. If you see a documentation page ("Available languages and regions for Google AI Studio and Gemini API"), it means that it's not yet available in your region.

  • ANTHROPIC_API_KEY: The API key for the Anthropic API. Get yours from here.

  • VOYAGE_API_KEY: The API key for the Voyage API. Free tier is upto 50M tokens! Get yours from here.

  • GROQ_API_KEY: The API key for the Groq API. Free in beta! Get yours from here.

  • DEEPSEEK_API_KEY: The API key for the DeepSeek API. Get 5 credit when you join. Get yours from here.

  • OPENROUTER_API_KEY: The API key for the OpenRouter API. Get yours from here.

  • CEREBRAS_API_KEY: The API key for the Cerebras API.

  • LOG_DIR: The directory to save the logs to, eg, when using SaverSchema <: AbstractTracerSchema. Defaults to joinpath(pwd(), "log"). Refer to ?SaverSchema for more information on how it works and examples.

Preferences.jl takes priority over ENV variables, so if you set a preference, it will take precedence over the ENV variable.

WARNING: NEVER EVER sync your LocalPreferences.toml file! It contains your API key and other sensitive information!!!

source


# PromptingTools.RESERVED_KWARGSConstant.

The following keywords are reserved for internal use in the ai* functions and cannot be used as placeholders in the Messages

source


# PromptingTools.AICodeType.
julia
AICode(code::AbstractString; auto_eval::Bool=true, safe_eval::Bool=false, 
+    description = "GPT-3.5 Turbo is a 175B parameter model and a common default on the OpenAI API.")

Registering a model alias

julia
PromptingTools.MODEL_ALIASES["gpt3"] = "gpt-3.5-turbo"

source


# PromptingTools.OPENAI_TOKEN_IDS_GPT35_GPT4Constant.

Token IDs for GPT3.5 and GPT4 from https://platform.openai.com/tokenizer

source


# PromptingTools.PREFERENCESConstant.
julia
PREFERENCES

You can set preferences for PromptingTools by setting environment variables or by using the set_preferences!. It will create a LocalPreferences.toml file in your current directory and will reload your prefences from there.

Check your preferences by calling get_preferences(key::String).

Available Preferences (for set_preferences!)

  • OPENAI_API_KEY: The API key for the OpenAI API. See OpenAI's documentation for more information.

  • AZURE_OPENAI_API_KEY: The API key for the Azure OpenAI API. See Azure OpenAI's documentation for more information.

  • AZURE_OPENAI_HOST: The host for the Azure OpenAI API. See Azure OpenAI's documentation for more information.

  • MISTRALAI_API_KEY: The API key for the Mistral AI API. See Mistral AI's documentation for more information.

  • COHERE_API_KEY: The API key for the Cohere API. See Cohere's documentation for more information.

  • DATABRICKS_API_KEY: The API key for the Databricks Foundation Model API. See Databricks' documentation for more information.

  • DATABRICKS_HOST: The host for the Databricks API. See Databricks' documentation for more information.

  • TAVILY_API_KEY: The API key for the Tavily Search API. Register here. See more information here.

  • GOOGLE_API_KEY: The API key for Google Gemini models. Get yours from here. If you see a documentation page ("Available languages and regions for Google AI Studio and Gemini API"), it means that it's not yet available in your region.

  • ANTHROPIC_API_KEY: The API key for the Anthropic API. Get yours from here.

  • VOYAGE_API_KEY: The API key for the Voyage API. Free tier is upto 50M tokens! Get yours from here.

  • GROQ_API_KEY: The API key for the Groq API. Free in beta! Get yours from here.

  • DEEPSEEK_API_KEY: The API key for the DeepSeek API. Get 5 credit when you join. Get yours from here.

  • OPENROUTER_API_KEY: The API key for the OpenRouter API. Get yours from here.

  • CEREBRAS_API_KEY: The API key for the Cerebras API. Get yours from here.

  • MODEL_CHAT: The default model to use for aigenerate and most ai* calls. See MODEL_REGISTRY for a list of available models or define your own.

  • MODEL_EMBEDDING: The default model to use for aiembed (embedding documents). See MODEL_REGISTRY for a list of available models or define your own.

  • PROMPT_SCHEMA: The default prompt schema to use for aigenerate and most ai* calls (if not specified in MODEL_REGISTRY). Set as a string, eg, "OpenAISchema". See PROMPT_SCHEMA for more information.

  • MODEL_ALIASES: A dictionary of model aliases (alias => full_model_name). Aliases are used to refer to models by their aliases instead of their full names to make it more convenient to use them. See MODEL_ALIASES for more information.

  • MAX_HISTORY_LENGTH: The maximum length of the conversation history. Defaults to 5. Set to nothing to disable history. See CONV_HISTORY for more information.

  • LOCAL_SERVER: The URL of the local server to use for ai* calls. Defaults to http://localhost:10897/v1. This server is called when you call model="local" See ?LocalServerOpenAISchema for more information and examples.

  • LOG_DIR: The directory to save the logs to, eg, when using SaverSchema <: AbstractTracerSchema. Defaults to joinpath(pwd(), "log"). Refer to ?SaverSchema for more information on how it works and examples.

At the moment it is not possible to persist changes to MODEL_REGISTRY across sessions. Define your register_model!() calls in your startup.jl file to make them available across sessions or put them at the top of your script.

Available ENV Variables

  • OPENAI_API_KEY: The API key for the OpenAI API.

  • AZURE_OPENAI_API_KEY: The API key for the Azure OpenAI API.

  • AZURE_OPENAI_HOST: The host for the Azure OpenAI API. This is the URL built as https://<resource-name>.openai.azure.com.

  • MISTRALAI_API_KEY: The API key for the Mistral AI API.

  • COHERE_API_KEY: The API key for the Cohere API.

  • LOCAL_SERVER: The URL of the local server to use for ai* calls. Defaults to http://localhost:10897/v1. This server is called when you call model="local"

  • DATABRICKS_API_KEY: The API key for the Databricks Foundation Model API.

  • DATABRICKS_HOST: The host for the Databricks API.

  • TAVILY_API_KEY: The API key for the Tavily Search API. Register here. See more information here.

  • GOOGLE_API_KEY: The API key for Google Gemini models. Get yours from here. If you see a documentation page ("Available languages and regions for Google AI Studio and Gemini API"), it means that it's not yet available in your region.

  • ANTHROPIC_API_KEY: The API key for the Anthropic API. Get yours from here.

  • VOYAGE_API_KEY: The API key for the Voyage API. Free tier is upto 50M tokens! Get yours from here.

  • GROQ_API_KEY: The API key for the Groq API. Free in beta! Get yours from here.

  • DEEPSEEK_API_KEY: The API key for the DeepSeek API. Get 5 credit when you join. Get yours from here.

  • OPENROUTER_API_KEY: The API key for the OpenRouter API. Get yours from here.

  • CEREBRAS_API_KEY: The API key for the Cerebras API.

  • LOG_DIR: The directory to save the logs to, eg, when using SaverSchema <: AbstractTracerSchema. Defaults to joinpath(pwd(), "log"). Refer to ?SaverSchema for more information on how it works and examples.

Preferences.jl takes priority over ENV variables, so if you set a preference, it will take precedence over the ENV variable.

WARNING: NEVER EVER sync your LocalPreferences.toml file! It contains your API key and other sensitive information!!!

source


# PromptingTools.RESERVED_KWARGSConstant.

The following keywords are reserved for internal use in the ai* functions and cannot be used as placeholders in the Messages

source


# PromptingTools.AICodeType.
julia
AICode(code::AbstractString; auto_eval::Bool=true, safe_eval::Bool=false, 
 skip_unsafe::Bool=false, capture_stdout::Bool=true, verbose::Bool=false,
 prefix::AbstractString="", suffix::AbstractString="", remove_tests::Bool=false, execution_timeout::Int = 60)
 
@@ -47,7 +47,7 @@
 code.code |> clipboard
 
 # or execute it in the current module (=Main)
-eval(code.expression)

source


# PromptingTools.AIMessageType.
julia
AIMessage

A message type for AI-generated text-based responses. Returned by aigenerate, aiclassify, and aiscan functions.

Fields

  • content::Union{AbstractString, Nothing}: The content of the message.

  • status::Union{Int, Nothing}: The status of the message from the API.

  • name::Union{Nothing, String}: The name of the role in the conversation.

  • tokens::Tuple{Int, Int}: The number of tokens used (prompt,completion).

  • elapsed::Float64: The time taken to generate the response in seconds.

  • cost::Union{Nothing, Float64}: The cost of the API call (calculated with information from MODEL_REGISTRY).

  • log_prob::Union{Nothing, Float64}: The log probability of the response.

  • extras::Union{Nothing, Dict{Symbol, Any}}: A dictionary for additional metadata that is not part of the key message fields. Try to limit to a small number of items and singletons to be serializable.

  • finish_reason::Union{Nothing, String}: The reason the response was finished.

  • run_id::Union{Nothing, Int}: The unique ID of the run.

  • sample_id::Union{Nothing, Int}: The unique ID of the sample (if multiple samples are generated, they will all have the same run_id).

source


# PromptingTools.AITemplateType.
julia
AITemplate

AITemplate is a template for a conversation prompt. This type is merely a container for the template name, which is resolved into a set of messages (=prompt) by render.

Naming Convention

  • Template names should be in CamelCase

  • Follow the format <Persona>...<Variable>... where possible, eg, JudgeIsItTrue, ``

    • Starting with the Persona (=System prompt), eg, Judge = persona is meant to judge some provided information

    • Variable to be filled in with context, eg, It = placeholder it

    • Ending with the variable name is helpful, eg, JuliaExpertTask for a persona to be an expert in Julia language and task is the placeholder name

  • Ideally, the template name should be self-explanatory, eg, JudgeIsItTrue = persona is meant to judge some provided information where it is true or false

Examples

Save time by re-using pre-made templates, just fill in the placeholders with the keyword arguments:

julia
msg = aigenerate(:JuliaExpertAsk; ask = "How do I add packages?")

The above is equivalent to a more verbose version that explicitly uses the dispatch on AITemplate:

julia
msg = aigenerate(AITemplate(:JuliaExpertAsk); ask = "How do I add packages?")

Find available templates with aitemplates:

julia
tmps = aitemplates("JuliaExpertAsk")
+eval(code.expression)

source


# PromptingTools.AIMessageType.
julia
AIMessage

A message type for AI-generated text-based responses. Returned by aigenerate, aiclassify, and aiscan functions.

Fields

  • content::Union{AbstractString, Nothing}: The content of the message.

  • status::Union{Int, Nothing}: The status of the message from the API.

  • name::Union{Nothing, String}: The name of the role in the conversation.

  • tokens::Tuple{Int, Int}: The number of tokens used (prompt,completion).

  • elapsed::Float64: The time taken to generate the response in seconds.

  • cost::Union{Nothing, Float64}: The cost of the API call (calculated with information from MODEL_REGISTRY).

  • log_prob::Union{Nothing, Float64}: The log probability of the response.

  • extras::Union{Nothing, Dict{Symbol, Any}}: A dictionary for additional metadata that is not part of the key message fields. Try to limit to a small number of items and singletons to be serializable.

  • finish_reason::Union{Nothing, String}: The reason the response was finished.

  • run_id::Union{Nothing, Int}: The unique ID of the run.

  • sample_id::Union{Nothing, Int}: The unique ID of the sample (if multiple samples are generated, they will all have the same run_id).

source


# PromptingTools.AITemplateType.
julia
AITemplate

AITemplate is a template for a conversation prompt. This type is merely a container for the template name, which is resolved into a set of messages (=prompt) by render.

Naming Convention

  • Template names should be in CamelCase

  • Follow the format <Persona>...<Variable>... where possible, eg, JudgeIsItTrue, ``

    • Starting with the Persona (=System prompt), eg, Judge = persona is meant to judge some provided information

    • Variable to be filled in with context, eg, It = placeholder it

    • Ending with the variable name is helpful, eg, JuliaExpertTask for a persona to be an expert in Julia language and task is the placeholder name

  • Ideally, the template name should be self-explanatory, eg, JudgeIsItTrue = persona is meant to judge some provided information where it is true or false

Examples

Save time by re-using pre-made templates, just fill in the placeholders with the keyword arguments:

julia
msg = aigenerate(:JuliaExpertAsk; ask = "How do I add packages?")

The above is equivalent to a more verbose version that explicitly uses the dispatch on AITemplate:

julia
msg = aigenerate(AITemplate(:JuliaExpertAsk); ask = "How do I add packages?")

Find available templates with aitemplates:

julia
tmps = aitemplates("JuliaExpertAsk")
 # Will surface one specific template
 # 1-element Vector{AITemplateMetadata}:
 # PromptingTools.AITemplateMetadata
@@ -62,14 +62,14 @@
 {{ask}}"
 #   source: String ""

The above gives you a good idea of what the template is about, what placeholders are available, and how much it would cost to use it (=wordcount).

Search for all Julia-related templates:

julia
tmps = aitemplates("Julia")
 # 2-element Vector{AITemplateMetadata}... -> more to come later!

If you are on VSCode, you can leverage nice tabular display with vscodedisplay:

julia
using DataFrames
-tmps = aitemplates("Julia") |> DataFrame |> vscodedisplay

I have my selected template, how do I use it? Just use the "name" in aigenerate or aiclassify like you see in the first example!

You can inspect any template by "rendering" it (this is what the LLM will see):

julia
julia> AITemplate(:JudgeIsItTrue) |> PromptingTools.render

See also: save_template, load_template, load_templates! for more advanced use cases (and the corresponding script in examples/ folder)

source


# PromptingTools.AITemplateMetadataType.

Helper for easy searching and reviewing of templates. Defined on loading of each template.

source


# PromptingTools.AbstractPromptSchemaType.

Defines different prompting styles based on the model training and fine-tuning.

source


# PromptingTools.AbstractToolType.
julia
AbstractTool

Abstract type for all tool types.

Required fields:

  • name::String: The name of the tool.

  • parameters::Dict: The parameters of the tool.

  • description::Union{String, Nothing}: The description of the tool.

  • callable::Any: The callable object of the tool, eg, a type or a function.

source


# PromptingTools.AnthropicSchemaType.
julia
AnthropicSchema <: AbstractAnthropicSchema

AnthropicSchema is the default schema for Anthropic API models (eg, Claude). See more information here.

It uses the following conversation template:

Dict(role="user",content="..."),Dict(role="assistant",content="...")]

system messages are provided as a keyword argument to the API call.

It's recommended to separate sections in your prompt with XML markup (e.g. <document> </document>). See here.

source


# PromptingTools.AzureOpenAISchemaType.

AzureOpenAISchema

AzureOpenAISchema() allows user to call Azure OpenAI API. API Reference

Requires two environment variables to be set:

  • AZURE_OPENAI_API_KEY: Azure token

  • AZURE_OPENAI_HOST: Address of the Azure resource ("https://<resource>.openai.azure.com")

source


# PromptingTools.CerebrasOpenAISchemaType.
julia
CerebrasOpenAISchema

Schema to call the Cerebras API.

Links:

Requires one environment variable to be set:

  • CEREBRAS_API_KEY: Your API key

source


# PromptingTools.ChatMLSchemaType.

ChatMLSchema is used by many open-source chatbots, by OpenAI models (under the hood) and by several models and inferfaces (eg, Ollama, vLLM)

You can explore it on tiktokenizer

It uses the following conversation structure:

<im_start>system
+tmps = aitemplates("Julia") |> DataFrame |> vscodedisplay

I have my selected template, how do I use it? Just use the "name" in aigenerate or aiclassify like you see in the first example!

You can inspect any template by "rendering" it (this is what the LLM will see):

julia
julia> AITemplate(:JudgeIsItTrue) |> PromptingTools.render

See also: save_template, load_template, load_templates! for more advanced use cases (and the corresponding script in examples/ folder)

source


# PromptingTools.AITemplateMetadataType.

Helper for easy searching and reviewing of templates. Defined on loading of each template.

source


# PromptingTools.AbstractPromptSchemaType.

Defines different prompting styles based on the model training and fine-tuning.

source


# PromptingTools.AbstractToolType.
julia
AbstractTool

Abstract type for all tool types.

Required fields:

  • name::String: The name of the tool.

  • parameters::Dict: The parameters of the tool.

  • description::Union{String, Nothing}: The description of the tool.

  • callable::Any: The callable object of the tool, eg, a type or a function.

source


# PromptingTools.AnthropicSchemaType.
julia
AnthropicSchema <: AbstractAnthropicSchema

AnthropicSchema is the default schema for Anthropic API models (eg, Claude). See more information here.

It uses the following conversation template:

Dict(role="user",content="..."),Dict(role="assistant",content="...")]

system messages are provided as a keyword argument to the API call.

It's recommended to separate sections in your prompt with XML markup (e.g. <document> </document>). See here.

source


# PromptingTools.AzureOpenAISchemaType.

AzureOpenAISchema

AzureOpenAISchema() allows user to call Azure OpenAI API. API Reference

Requires two environment variables to be set:

  • AZURE_OPENAI_API_KEY: Azure token

  • AZURE_OPENAI_HOST: Address of the Azure resource ("https://<resource>.openai.azure.com")

source


# PromptingTools.CerebrasOpenAISchemaType.
julia
CerebrasOpenAISchema

Schema to call the Cerebras API.

Links:

Requires one environment variable to be set:

  • CEREBRAS_API_KEY: Your API key

source


# PromptingTools.ChatMLSchemaType.

ChatMLSchema is used by many open-source chatbots, by OpenAI models (under the hood) and by several models and inferfaces (eg, Ollama, vLLM)

You can explore it on tiktokenizer

It uses the following conversation structure:

<im_start>system
 ...<im_end>
 <|im_start|>user
 ...<|im_end|>
 <|im_start|>assistant
-...<|im_end|>

source


# PromptingTools.CustomOpenAISchemaType.
julia
CustomOpenAISchema

CustomOpenAISchema() allows user to call any OpenAI-compatible API.

All user needs to do is to pass this schema as the first argument and provide the BASE URL of the API to call (api_kwargs.url).

Example

Assumes that we have a local server running at http://127.0.0.1:8081:

julia
api_key = "..."
+...<|im_end|>

source


# PromptingTools.CustomOpenAISchemaType.
julia
CustomOpenAISchema

CustomOpenAISchema() allows user to call any OpenAI-compatible API.

All user needs to do is to pass this schema as the first argument and provide the BASE URL of the API to call (api_kwargs.url).

Example

Assumes that we have a local server running at http://127.0.0.1:8081:

julia
api_key = "..."
 prompt = "Say hi!"
-msg = aigenerate(CustomOpenAISchema(), prompt; model="my_model", api_key, api_kwargs=(; url="http://127.0.0.1:8081"))

source


# PromptingTools.DataMessageType.
julia
DataMessage

A message type for AI-generated data-based responses, ie, different content than text. Returned by aiextract, and aiextract functions.

Fields

  • content::Union{AbstractString, Nothing}: The content of the message.

  • status::Union{Int, Nothing}: The status of the message from the API.

  • tokens::Tuple{Int, Int}: The number of tokens used (prompt,completion).

  • elapsed::Float64: The time taken to generate the response in seconds.

  • cost::Union{Nothing, Float64}: The cost of the API call (calculated with information from MODEL_REGISTRY).

  • log_prob::Union{Nothing, Float64}: The log probability of the response.

  • extras::Union{Nothing, Dict{Symbol, Any}}: A dictionary for additional metadata that is not part of the key message fields. Try to limit to a small number of items and singletons to be serializable.

  • finish_reason::Union{Nothing, String}: The reason the response was finished.

  • run_id::Union{Nothing, Int}: The unique ID of the run.

  • sample_id::Union{Nothing, Int}: The unique ID of the sample (if multiple samples are generated, they will all have the same run_id).

source


# PromptingTools.DatabricksOpenAISchemaType.
julia
DatabricksOpenAISchema

DatabricksOpenAISchema() allows user to call Databricks Foundation Model API. API Reference

Requires two environment variables to be set:

  • DATABRICKS_API_KEY: Databricks token

  • DATABRICKS_HOST: Address of the Databricks workspace (https://<workspace_host>.databricks.com)

source


# PromptingTools.DeepSeekOpenAISchemaType.
julia
DeepSeekOpenAISchema

Schema to call the DeepSeek API.

Links:

Requires one environment variables to be set:

  • DEEPSEEK_API_KEY: Your API key (often starts with "sk-...")

source


# PromptingTools.FireworksOpenAISchemaType.
julia
FireworksOpenAISchema

Schema to call the Fireworks.ai API.

Links:

Requires one environment variables to be set:

  • FIREWORKS_API_KEY: Your API key

source


# PromptingTools.GoogleSchemaType.

Calls Google's Gemini API. See more information here. It's available only for some regions.

source


# PromptingTools.GroqOpenAISchemaType.
julia
GroqOpenAISchema

Schema to call the groq.com API.

Links:

Requires one environment variables to be set:

  • GROQ_API_KEY: Your API key (often starts with "gsk_...")

source


# PromptingTools.ItemsExtractType.

Extract zero, one or more specified items from the provided data.

source


# PromptingTools.LocalServerOpenAISchemaType.
julia
LocalServerOpenAISchema

Designed to be used with local servers. It's automatically called with model alias "local" (see MODEL_REGISTRY).

This schema is a flavor of CustomOpenAISchema with a url keypreset by global Preference keyLOCAL_SERVER. See?PREFERENCESfor more details on how to change it. It assumes that the server follows OpenAI API conventions (eg,POST /v1/chat/completions`).

Note: Llama.cpp (and hence Llama.jl built on top of it) do NOT support embeddings endpoint! You'll get an address error.

Example

Assumes that we have a local server running at http://127.0.0.1:10897/v1 (port and address used by Llama.jl, "v1" at the end is needed for OpenAI endpoint compatibility):

Three ways to call it:

julia

+msg = aigenerate(CustomOpenAISchema(), prompt; model="my_model", api_key, api_kwargs=(; url="http://127.0.0.1:8081"))

source


# PromptingTools.DataMessageType.
julia
DataMessage

A message type for AI-generated data-based responses, ie, different content than text. Returned by aiextract, and aiextract functions.

Fields

  • content::Union{AbstractString, Nothing}: The content of the message.

  • status::Union{Int, Nothing}: The status of the message from the API.

  • tokens::Tuple{Int, Int}: The number of tokens used (prompt,completion).

  • elapsed::Float64: The time taken to generate the response in seconds.

  • cost::Union{Nothing, Float64}: The cost of the API call (calculated with information from MODEL_REGISTRY).

  • log_prob::Union{Nothing, Float64}: The log probability of the response.

  • extras::Union{Nothing, Dict{Symbol, Any}}: A dictionary for additional metadata that is not part of the key message fields. Try to limit to a small number of items and singletons to be serializable.

  • finish_reason::Union{Nothing, String}: The reason the response was finished.

  • run_id::Union{Nothing, Int}: The unique ID of the run.

  • sample_id::Union{Nothing, Int}: The unique ID of the sample (if multiple samples are generated, they will all have the same run_id).

source


# PromptingTools.DatabricksOpenAISchemaType.
julia
DatabricksOpenAISchema

DatabricksOpenAISchema() allows user to call Databricks Foundation Model API. API Reference

Requires two environment variables to be set:

  • DATABRICKS_API_KEY: Databricks token

  • DATABRICKS_HOST: Address of the Databricks workspace (https://<workspace_host>.databricks.com)

source


# PromptingTools.DeepSeekOpenAISchemaType.
julia
DeepSeekOpenAISchema

Schema to call the DeepSeek API.

Links:

Requires one environment variables to be set:

  • DEEPSEEK_API_KEY: Your API key (often starts with "sk-...")

source


# PromptingTools.FireworksOpenAISchemaType.
julia
FireworksOpenAISchema

Schema to call the Fireworks.ai API.

Links:

Requires one environment variables to be set:

  • FIREWORKS_API_KEY: Your API key

source


# PromptingTools.GoogleSchemaType.

Calls Google's Gemini API. See more information here. It's available only for some regions.

source


# PromptingTools.GroqOpenAISchemaType.
julia
GroqOpenAISchema

Schema to call the groq.com API.

Links:

Requires one environment variables to be set:

  • GROQ_API_KEY: Your API key (often starts with "gsk_...")

source


# PromptingTools.ItemsExtractType.

Extract zero, one or more specified items from the provided data.

source


# PromptingTools.LocalServerOpenAISchemaType.
julia
LocalServerOpenAISchema

Designed to be used with local servers. It's automatically called with model alias "local" (see MODEL_REGISTRY).

This schema is a flavor of CustomOpenAISchema with a url keypreset by global Preference keyLOCAL_SERVER. See?PREFERENCESfor more details on how to change it. It assumes that the server follows OpenAI API conventions (eg,POST /v1/chat/completions`).

Note: Llama.cpp (and hence Llama.jl built on top of it) do NOT support embeddings endpoint! You'll get an address error.

Example

Assumes that we have a local server running at http://127.0.0.1:10897/v1 (port and address used by Llama.jl, "v1" at the end is needed for OpenAI endpoint compatibility):

Three ways to call it:

julia

 # Use @ai_str with "local" alias
 ai"Say hi!"local
 
@@ -86,8 +86,8 @@
 
 # Or if it's a temporary fix, just change the variable `LOCAL_SERVER`:
 const PT = PromptingTools
-PT.LOCAL_SERVER = "http://127.0.0.1:10897/v1"

source


# PromptingTools.MaybeExtractType.

Extract a result from the provided data, if any, otherwise set the error and message fields.

Arguments

  • error::Bool: true if a result is found, false otherwise.

  • message::String: Only present if no result is found, should be short and concise.

source


# PromptingTools.MistralOpenAISchemaType.
julia
MistralOpenAISchema

MistralOpenAISchema() allows user to call MistralAI API known for mistral and mixtral models.

It's a flavor of CustomOpenAISchema() with a url preset to https://api.mistral.ai.

Most models have been registered, so you don't even have to specify the schema

Example

Let's call mistral-tiny model:

julia
api_key = "..." # can be set via ENV["MISTRAL_API_KEY"] or via our preference system
-msg = aigenerate("Say hi!"; model="mistral_tiny", api_key)

See ?PREFERENCES for more details on how to set your API key permanently.

source


# PromptingTools.ModelSpecType.
julia
ModelSpec

A struct that contains information about a model, such as its name, schema, cost per token, etc.

Fields

  • name::String: The name of the model. This is the name that will be used to refer to the model in the ai* functions.

  • schema::AbstractPromptSchema: The schema of the model. This is the schema that will be used to generate prompts for the model, eg, :OpenAISchema.

  • cost_of_token_prompt::Float64: The cost of 1 token in the prompt for this model. This is used to calculate the cost of a prompt. Note: It is often provided online as cost per 1000 tokens, so make sure to convert it correctly!

  • cost_of_token_generation::Float64: The cost of 1 token generated by this model. This is used to calculate the cost of a generation. Note: It is often provided online as cost per 1000 tokens, so make sure to convert it correctly!

  • description::String: A description of the model. This is used to provide more information about the model when it is queried.

Example

julia
spec = ModelSpec("gpt-3.5-turbo",
+PT.LOCAL_SERVER = "http://127.0.0.1:10897/v1"

source


# PromptingTools.MaybeExtractType.

Extract a result from the provided data, if any, otherwise set the error and message fields.

Arguments

  • error::Bool: true if a result is found, false otherwise.

  • message::String: Only present if no result is found, should be short and concise.

source


# PromptingTools.MistralOpenAISchemaType.
julia
MistralOpenAISchema

MistralOpenAISchema() allows user to call MistralAI API known for mistral and mixtral models.

It's a flavor of CustomOpenAISchema() with a url preset to https://api.mistral.ai.

Most models have been registered, so you don't even have to specify the schema

Example

Let's call mistral-tiny model:

julia
api_key = "..." # can be set via ENV["MISTRAL_API_KEY"] or via our preference system
+msg = aigenerate("Say hi!"; model="mistral_tiny", api_key)

See ?PREFERENCES for more details on how to set your API key permanently.

source


# PromptingTools.ModelSpecType.
julia
ModelSpec

A struct that contains information about a model, such as its name, schema, cost per token, etc.

Fields

  • name::String: The name of the model. This is the name that will be used to refer to the model in the ai* functions.

  • schema::AbstractPromptSchema: The schema of the model. This is the schema that will be used to generate prompts for the model, eg, :OpenAISchema.

  • cost_of_token_prompt::Float64: The cost of 1 token in the prompt for this model. This is used to calculate the cost of a prompt. Note: It is often provided online as cost per 1000 tokens, so make sure to convert it correctly!

  • cost_of_token_generation::Float64: The cost of 1 token generated by this model. This is used to calculate the cost of a generation. Note: It is often provided online as cost per 1000 tokens, so make sure to convert it correctly!

  • description::String: A description of the model. This is used to provide more information about the model when it is queried.

Example

julia
spec = ModelSpec("gpt-3.5-turbo",
     OpenAISchema(),
     0.0015,
     0.002,
@@ -99,7 +99,7 @@
     schema = OpenAISchema(),
     cost_of_token_prompt = 0.0015,
     cost_of_token_generation = 0.002,
-    description = "GPT-3.5 Turbo is a 175B parameter model and a common default on the OpenAI API.")

source


# PromptingTools.NoSchemaType.

Schema that keeps messages (<:AbstractMessage) and does not transform for any specific model. It used by the first pass of the prompt rendering system (see ?render).

source


# PromptingTools.OllamaManagedSchemaType.

Ollama by default manages different models and their associated prompt schemas when you pass system_prompt and prompt fields to the API.

Warning: It works only for 1 system message and 1 user message, so anything more than that has to be rejected.

If you need to pass more messagese / longer conversational history, you can use define the model-specific schema directly and pass your Ollama requests with raw=true, which disables and templating and schema management by Ollama.

source


# PromptingTools.OllamaSchemaType.

OllamaSchema is the default schema for Olama models.

It uses the following conversation template:

[Dict(role="system",content="..."),Dict(role="user",content="..."),Dict(role="assistant",content="...")]

It's very similar to OpenAISchema, but it appends images differently.

source


# PromptingTools.OpenAISchemaType.

OpenAISchema is the default schema for OpenAI models.

It uses the following conversation template:

[Dict(role="system",content="..."),Dict(role="user",content="..."),Dict(role="assistant",content="...")]

It's recommended to separate sections in your prompt with markdown headers (e.g. `##Answer

`).

source


# PromptingTools.OpenRouterOpenAISchemaType.
julia
OpenRouterOpenAISchema

Schema to call the OpenRouter API.

Links:

Requires one environment variable to be set:

  • OPENROUTER_API_KEY: Your API key

source


# PromptingTools.SaverSchemaType.
julia
SaverSchema <: AbstractTracerSchema

SaverSchema is a schema that automatically saves the conversation to the disk. It's useful for debugging and for persistent logging.

It can be composed with any other schema, eg, TracerSchema to save additional metadata.

Set environment variable LOG_DIR to the directory where you want to save the conversation (see ?PREFERENCES). Conversations are named by the hash of the first message in the conversation to naturally group subsequent conversations together.

If you need to provide logging directory of the file name dynamically, you can provide the following arguments to tracer_kwargs:

  • log_dir - used as the directory to save the log into when provided. Defaults to LOG_DIR if not provided.

  • log_file_path - used as the file name to save the log into when provided. This value overrules the log_dir and LOG_DIR if provided.

To use it automatically, re-register the models you use with the schema wrapped in SaverSchema

See also: meta, unwrap, TracerSchema, initialize_tracer, finalize_tracer

Example

julia
using PromptingTools: TracerSchema, OpenAISchema, SaverSchema
+    description = "GPT-3.5 Turbo is a 175B parameter model and a common default on the OpenAI API.")

source


# PromptingTools.NoSchemaType.

Schema that keeps messages (<:AbstractMessage) and does not transform for any specific model. It used by the first pass of the prompt rendering system (see ?render).

source


# PromptingTools.OllamaManagedSchemaType.

Ollama by default manages different models and their associated prompt schemas when you pass system_prompt and prompt fields to the API.

Warning: It works only for 1 system message and 1 user message, so anything more than that has to be rejected.

If you need to pass more messagese / longer conversational history, you can use define the model-specific schema directly and pass your Ollama requests with raw=true, which disables and templating and schema management by Ollama.

source


# PromptingTools.OllamaSchemaType.

OllamaSchema is the default schema for Olama models.

It uses the following conversation template:

[Dict(role="system",content="..."),Dict(role="user",content="..."),Dict(role="assistant",content="...")]

It's very similar to OpenAISchema, but it appends images differently.

source


# PromptingTools.OpenAISchemaType.

OpenAISchema is the default schema for OpenAI models.

It uses the following conversation template:

[Dict(role="system",content="..."),Dict(role="user",content="..."),Dict(role="assistant",content="...")]

It's recommended to separate sections in your prompt with markdown headers (e.g. `##Answer

`).

source


# PromptingTools.OpenRouterOpenAISchemaType.
julia
OpenRouterOpenAISchema

Schema to call the OpenRouter API.

Links:

Requires one environment variable to be set:

  • OPENROUTER_API_KEY: Your API key

source


# PromptingTools.SaverSchemaType.
julia
SaverSchema <: AbstractTracerSchema

SaverSchema is a schema that automatically saves the conversation to the disk. It's useful for debugging and for persistent logging.

It can be composed with any other schema, eg, TracerSchema to save additional metadata.

Set environment variable LOG_DIR to the directory where you want to save the conversation (see ?PREFERENCES). Conversations are named by the hash of the first message in the conversation to naturally group subsequent conversations together.

If you need to provide logging directory of the file name dynamically, you can provide the following arguments to tracer_kwargs:

  • log_dir - used as the directory to save the log into when provided. Defaults to LOG_DIR if not provided.

  • log_file_path - used as the file name to save the log into when provided. This value overrules the log_dir and LOG_DIR if provided.

To use it automatically, re-register the models you use with the schema wrapped in SaverSchema

See also: meta, unwrap, TracerSchema, initialize_tracer, finalize_tracer

Example

julia
using PromptingTools: TracerSchema, OpenAISchema, SaverSchema
 # This schema will first trace the metadata (change to TraceMessage) and then save the conversation to the disk
 
 wrap_schema = OpenAISchema() |> TracerSchema |> SaverSchema
@@ -108,7 +108,7 @@
 
 # conv is a vector of messages that will be saved to a JSON together with metadata about the template and api_kwargs

If you wanted to enable this automatically for models you use, you can do it like this:

julia
PT.register_model!(; name= "gpt-3.5-turbo", schema=OpenAISchema() |> TracerSchema |> SaverSchema)

Any subsequent calls model="gpt-3.5-turbo" will automatically capture metadata and save the conversation to the disk.

To provide logging file path explicitly, use the tracer_kwargs:

julia
conv = aigenerate(wrap_schema,:BlankSystemUser; system="You're a French-speaking assistant!",
     user="Say hi!", model="gpt-4", api_kwargs=(;temperature=0.1), return_all=true,
-    tracer_kwargs=(; log_file_path="my_logs/my_log.json"))

source


# PromptingTools.ShareGPTSchemaType.
julia
ShareGPTSchema <: AbstractShareGPTSchema

Frequently used schema for finetuning LLMs. Conversations are recorded as a vector of dicts with keys from and value (similar to OpenAI).

source


# PromptingTools.StreamCallbackType.
julia
StreamCallback

Simplest callback for streaming message, which just prints the content to the output stream defined by out. When streaming is over, it builds the response body from the chunks and returns it as if it was a normal response from the API.

For more complex use cases, you can define your own callback. See the interface description below for more information.

Fields

  • out: The output stream, eg, stdout or a pipe.

  • flavor: The stream flavor which might or might not differ between different providers, eg, OpenAIStream or AnthropicStream.

  • chunks: The list of received StreamChunk chunks.

  • verbose: Whether to print verbose information. If you enable DEBUG logging, you will see the chunks as they come in.

  • throw_on_error: Whether to throw an error if an error message is detected in the streaming response.

  • kwargs: Any custom keyword arguments required for your use case.

Interface

  • StreamCallback(; kwargs...): Constructor for the StreamCallback object.

  • streamed_request!(cb, url, headers, input): End-to-end wrapper for POST streaming requests.

streamed_request! composes of:

  • extract_chunks(flavor, blob): Extract the chunks from the received SSE blob. Returns a list of StreamChunk and the next spillover (if message was incomplete).

  • callback(cb, chunk): Process the chunk to be printed

    • extract_content(flavor, chunk): Extract the content from the chunk.

    • print_content(out, text): Print the content to the output stream.

  • is_done(flavor, chunk): Check if the stream is done.

  • build_response_body(flavor, cb): Build the response body from the chunks to mimic receiving a standard response from the API.

If you want to implement your own callback, you can create your own methods for the interface functions. Eg, if you want to print the streamed chunks into some specialized sink or Channel, you could define a simple method just for print_content.

Example

julia
using PromptingTools
+    tracer_kwargs=(; log_file_path="my_logs/my_log.json"))

source


# PromptingTools.ShareGPTSchemaType.
julia
ShareGPTSchema <: AbstractShareGPTSchema

Frequently used schema for finetuning LLMs. Conversations are recorded as a vector of dicts with keys from and value (similar to OpenAI).

source


# PromptingTools.StreamCallbackType.
julia
StreamCallback

Simplest callback for streaming message, which just prints the content to the output stream defined by out. When streaming is over, it builds the response body from the chunks and returns it as if it was a normal response from the API.

For more complex use cases, you can define your own callback. See the interface description below for more information.

Fields

  • out: The output stream, eg, stdout or a pipe.

  • flavor: The stream flavor which might or might not differ between different providers, eg, OpenAIStream or AnthropicStream.

  • chunks: The list of received StreamChunk chunks.

  • verbose: Whether to print verbose information. If you enable DEBUG logging, you will see the chunks as they come in.

  • throw_on_error: Whether to throw an error if an error message is detected in the streaming response.

  • kwargs: Any custom keyword arguments required for your use case.

Interface

  • StreamCallback(; kwargs...): Constructor for the StreamCallback object.

  • streamed_request!(cb, url, headers, input): End-to-end wrapper for POST streaming requests.

streamed_request! composes of:

  • extract_chunks(flavor, blob): Extract the chunks from the received SSE blob. Returns a list of StreamChunk and the next spillover (if message was incomplete).

  • callback(cb, chunk): Process the chunk to be printed

    • extract_content(flavor, chunk): Extract the content from the chunk.

    • print_content(out, text): Print the content to the output stream.

  • is_done(flavor, chunk): Check if the stream is done.

  • build_response_body(flavor, cb): Build the response body from the chunks to mimic receiving a standard response from the API.

If you want to implement your own callback, you can create your own methods for the interface functions. Eg, if you want to print the streamed chunks into some specialized sink or Channel, you could define a simple method just for print_content.

Example

julia
using PromptingTools
 const PT = PromptingTools
 
 # Simplest usage, just provide where to steam the text (we build the callback for you)
@@ -120,30 +120,30 @@
 
 # Get verbose output with details of each chunk for debugging
 streamcallback = PT.StreamCallback(; verbose=true, throw_on_error=true)
-msg = aigenerate("Count from 1 to 10."; streamcallback)

Note: If you provide a StreamCallback object to aigenerate, we will configure it and necessary api_kwargs via configure_callback! unless you specify the flavor field. If you provide a StreamCallback with a specific flavor, we leave all configuration to the user (eg, you need to provide the correct api_kwargs).

source


# PromptingTools.StreamChunkType.
julia
StreamChunk

A chunk of streaming data. A message is composed of multiple chunks.

Fields

  • event: The event name.

  • data: The data chunk.

  • json: The JSON object or nothing if the chunk does not contain JSON.

source


# PromptingTools.TestEchoAnthropicSchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TestEchoGoogleSchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TestEchoOllamaManagedSchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TestEchoOllamaSchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TestEchoOpenAISchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TogetherOpenAISchemaType.
julia
TogetherOpenAISchema

Schema to call the Together.ai API.

Links:

Requires one environment variables to be set:

  • TOGETHER_API_KEY: Your API key

source


# PromptingTools.ToolType.
julia
Tool

A tool that can be sent to an LLM for execution ("function calling").

Arguments

  • name::String: The name of the tool.

  • parameters::Dict: The parameters of the tool.

  • description::Union{String, Nothing}: The description of the tool.

  • strict::Union{Bool, Nothing}: Whether to enforce strict mode for the tool.

  • callable::Any: The callable object of the tool, eg, a type or a function.

See also: AbstractTool, tool_call_signature

source


# PromptingTools.ToolMethod.
julia
Tool(callable::Union{Function, Type, Method}; kwargs...)

Create a Tool from a callable object (function, type, or method).

Arguments

  • callable::Union{Function, Type, Method}: The callable object to convert to a tool.

Returns

  • Tool: A tool object that can be used for function calling.

Examples

julia
# Create a tool from a function
+msg = aigenerate("Count from 1 to 10."; streamcallback)

Note: If you provide a StreamCallback object to aigenerate, we will configure it and necessary api_kwargs via configure_callback! unless you specify the flavor field. If you provide a StreamCallback with a specific flavor, we leave all configuration to the user (eg, you need to provide the correct api_kwargs).

source


# PromptingTools.StreamChunkType.
julia
StreamChunk

A chunk of streaming data. A message is composed of multiple chunks.

Fields

  • event: The event name.

  • data: The data chunk.

  • json: The JSON object or nothing if the chunk does not contain JSON.

source


# PromptingTools.TestEchoAnthropicSchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TestEchoGoogleSchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TestEchoOllamaManagedSchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TestEchoOllamaSchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TestEchoOpenAISchemaType.

Echoes the user's input back to them. Used for testing the implementation

source


# PromptingTools.TogetherOpenAISchemaType.
julia
TogetherOpenAISchema

Schema to call the Together.ai API.

Links:

Requires one environment variables to be set:

  • TOGETHER_API_KEY: Your API key

source


# PromptingTools.ToolType.
julia
Tool

A tool that can be sent to an LLM for execution ("function calling").

Arguments

  • name::String: The name of the tool.

  • parameters::Dict: The parameters of the tool.

  • description::Union{String, Nothing}: The description of the tool.

  • strict::Union{Bool, Nothing}: Whether to enforce strict mode for the tool.

  • callable::Any: The callable object of the tool, eg, a type or a function.

See also: AbstractTool, tool_call_signature

source


# PromptingTools.ToolMethod.
julia
Tool(callable::Union{Function, Type, Method}; kwargs...)

Create a Tool from a callable object (function, type, or method).

Arguments

  • callable::Union{Function, Type, Method}: The callable object to convert to a tool.

Returns

  • Tool: A tool object that can be used for function calling.

Examples

julia
# Create a tool from a function
 tool = Tool(my_function)
 
 # Create a tool from a type
-tool = Tool(MyStruct)

source


# PromptingTools.TracerMessageType.
julia
TracerMessage{T <: Union{AbstractChatMessage, AbstractDataMessage}} <: AbstractTracerMessage

A mutable wrapper message designed for tracing the flow of messages through the system, allowing for iterative updates and providing additional metadata for observability.

Fields

  • object::T: The original message being traced, which can be either a chat or data message.

  • from::Union{Nothing, Symbol}: The identifier of the sender of the message.

  • to::Union{Nothing, Symbol}: The identifier of the intended recipient of the message.

  • viewers::Vector{Symbol}: A list of identifiers for entities that have access to view the message, in addition to the sender and recipient.

  • time_received::DateTime: The timestamp when the message was received by the tracing system.

  • time_sent::Union{Nothing, DateTime}: The timestamp when the message was originally sent, if available.

  • model::String: The name of the model that generated the message. Defaults to empty.

  • parent_id::Symbol: An identifier for the job or process that the message is associated with. Higher-level tracing ID.

  • thread_id::Symbol: An identifier for the thread (series of messages for one model/agent) or execution context within the job where the message originated. It should be the same for messages in the same thread.

  • meta::Union{Nothing, Dict{Symbol, Any}}: A dictionary for additional metadata that is not part of the message itself. Try to limit to a small number of items and singletons to be serializable.

  • _type::Symbol: A fixed symbol identifying the type of the message as :eventmessage, used for type discrimination.

This structure is particularly useful for debugging, monitoring, and auditing the flow of messages in systems that involve complex interactions or asynchronous processing.

All fields are optional besides the object.

Useful methods: pprint (pretty prints the underlying message), unwrap (to get the object out of tracer), align_tracer! (to set all shared IDs in a vector of tracers to the same), istracermessage to check if given message is an AbstractTracerMessage

Example

julia
wrap_schema = PT.TracerSchema(PT.OpenAISchema())
+tool = Tool(MyStruct)

source


# PromptingTools.TracerMessageType.
julia
TracerMessage{T <: Union{AbstractChatMessage, AbstractDataMessage}} <: AbstractTracerMessage

A mutable wrapper message designed for tracing the flow of messages through the system, allowing for iterative updates and providing additional metadata for observability.

Fields

  • object::T: The original message being traced, which can be either a chat or data message.

  • from::Union{Nothing, Symbol}: The identifier of the sender of the message.

  • to::Union{Nothing, Symbol}: The identifier of the intended recipient of the message.

  • viewers::Vector{Symbol}: A list of identifiers for entities that have access to view the message, in addition to the sender and recipient.

  • time_received::DateTime: The timestamp when the message was received by the tracing system.

  • time_sent::Union{Nothing, DateTime}: The timestamp when the message was originally sent, if available.

  • model::String: The name of the model that generated the message. Defaults to empty.

  • parent_id::Symbol: An identifier for the job or process that the message is associated with. Higher-level tracing ID.

  • thread_id::Symbol: An identifier for the thread (series of messages for one model/agent) or execution context within the job where the message originated. It should be the same for messages in the same thread.

  • meta::Union{Nothing, Dict{Symbol, Any}}: A dictionary for additional metadata that is not part of the message itself. Try to limit to a small number of items and singletons to be serializable.

  • _type::Symbol: A fixed symbol identifying the type of the message as :eventmessage, used for type discrimination.

This structure is particularly useful for debugging, monitoring, and auditing the flow of messages in systems that involve complex interactions or asynchronous processing.

All fields are optional besides the object.

Useful methods: pprint (pretty prints the underlying message), unwrap (to get the object out of tracer), align_tracer! (to set all shared IDs in a vector of tracers to the same), istracermessage to check if given message is an AbstractTracerMessage

Example

julia
wrap_schema = PT.TracerSchema(PT.OpenAISchema())
 msg = aigenerate(wrap_schema, "Say hi!"; model = "gpt4t")
 msg # isa TracerMessage
-msg.content # access content like if it was the message

source


# PromptingTools.TracerMessageLikeType.
julia
TracerMessageLike{T <: Any} <: AbstractTracer

A mutable structure designed for general-purpose tracing within the system, capable of handling any type of object that is part of the AI Conversation. It provides a flexible way to track and annotate objects as they move through different parts of the system, facilitating debugging, monitoring, and auditing.

Fields

  • object::T: The original object being traced.

  • from::Union{Nothing, Symbol}: The identifier of the sender or origin of the object.

  • to::Union{Nothing, Symbol}: The identifier of the intended recipient or destination of the object.

  • viewers::Vector{Symbol}: A list of identifiers for entities that have access to view the object, in addition to the sender and recipient.

  • time_received::DateTime: The timestamp when the object was received by the tracing system.

  • time_sent::Union{Nothing, DateTime}: The timestamp when the object was originally sent, if available.

  • model::String: The name of the model or process that generated or is associated with the object. Defaults to empty.

  • parent_id::Symbol: An identifier for the job or process that the object is associated with. Higher-level tracing ID.

  • thread_id::Symbol: An identifier for the thread or execution context (sub-task, sub-process) within the job where the object originated. It should be the same for objects in the same thread.

  • run_id::Union{Nothing, Int}: A unique identifier for the run or instance of the process (ie, a single call to the LLM) that generated the object. Defaults to a random integer.

  • meta::Union{Nothing, Dict{Symbol, Any}}: A dictionary for additional metadata that is not part of the object itself. Try to limit to a small number of items and singletons to be serializable.

  • _type::Symbol: A fixed symbol identifying the type of the tracer as :tracermessage, used for type discrimination.

This structure is particularly useful for systems that involve complex interactions or asynchronous processing, where tracking the flow and transformation of objects is crucial.

All fields are optional besides the object.

source


# PromptingTools.TracerSchemaType.
julia
TracerSchema <: AbstractTracerSchema

A schema designed to wrap another schema, enabling pre- and post-execution callbacks for tracing and additional functionalities. This type is specifically utilized within the TracerMessage type to trace the execution flow, facilitating observability and debugging in complex conversational AI systems.

The TracerSchema acts as a middleware, allowing developers to insert custom logic before and after the execution of the primary schema's functionality. This can include logging, performance measurement, or any other form of tracing required to understand or improve the execution flow.

TracerSchema automatically wraps messages in TracerMessage type, which has several important fields, eg,

  • object: the original message - unwrap with utility unwrap

  • meta: a dictionary with metadata about the tracing process (eg, prompt templates, LLM API kwargs) - extract with utility meta

  • parent_id: an identifier for the overall job / high-level conversation with the user where the current conversation thread originated. It should be the same for objects in the same thread.

  • thread_id: an identifier for the current thread or execution context (sub-task, sub-process, CURRENT CONVERSATION or vector of messages) within the broader parent task. It should be the same for objects in the same thread.

See also: meta, unwrap, SaverSchema, initialize_tracer, finalize_tracer

Example

julia
wrap_schema = TracerSchema(OpenAISchema())
+msg.content # access content like if it was the message

source


# PromptingTools.TracerMessageLikeType.
julia
TracerMessageLike{T <: Any} <: AbstractTracer

A mutable structure designed for general-purpose tracing within the system, capable of handling any type of object that is part of the AI Conversation. It provides a flexible way to track and annotate objects as they move through different parts of the system, facilitating debugging, monitoring, and auditing.

Fields

  • object::T: The original object being traced.

  • from::Union{Nothing, Symbol}: The identifier of the sender or origin of the object.

  • to::Union{Nothing, Symbol}: The identifier of the intended recipient or destination of the object.

  • viewers::Vector{Symbol}: A list of identifiers for entities that have access to view the object, in addition to the sender and recipient.

  • time_received::DateTime: The timestamp when the object was received by the tracing system.

  • time_sent::Union{Nothing, DateTime}: The timestamp when the object was originally sent, if available.

  • model::String: The name of the model or process that generated or is associated with the object. Defaults to empty.

  • parent_id::Symbol: An identifier for the job or process that the object is associated with. Higher-level tracing ID.

  • thread_id::Symbol: An identifier for the thread or execution context (sub-task, sub-process) within the job where the object originated. It should be the same for objects in the same thread.

  • run_id::Union{Nothing, Int}: A unique identifier for the run or instance of the process (ie, a single call to the LLM) that generated the object. Defaults to a random integer.

  • meta::Union{Nothing, Dict{Symbol, Any}}: A dictionary for additional metadata that is not part of the object itself. Try to limit to a small number of items and singletons to be serializable.

  • _type::Symbol: A fixed symbol identifying the type of the tracer as :tracermessage, used for type discrimination.

This structure is particularly useful for systems that involve complex interactions or asynchronous processing, where tracking the flow and transformation of objects is crucial.

All fields are optional besides the object.

source


# PromptingTools.TracerSchemaType.
julia
TracerSchema <: AbstractTracerSchema

A schema designed to wrap another schema, enabling pre- and post-execution callbacks for tracing and additional functionalities. This type is specifically utilized within the TracerMessage type to trace the execution flow, facilitating observability and debugging in complex conversational AI systems.

The TracerSchema acts as a middleware, allowing developers to insert custom logic before and after the execution of the primary schema's functionality. This can include logging, performance measurement, or any other form of tracing required to understand or improve the execution flow.

TracerSchema automatically wraps messages in TracerMessage type, which has several important fields, eg,

  • object: the original message - unwrap with utility unwrap

  • meta: a dictionary with metadata about the tracing process (eg, prompt templates, LLM API kwargs) - extract with utility meta

  • parent_id: an identifier for the overall job / high-level conversation with the user where the current conversation thread originated. It should be the same for objects in the same thread.

  • thread_id: an identifier for the current thread or execution context (sub-task, sub-process, CURRENT CONVERSATION or vector of messages) within the broader parent task. It should be the same for objects in the same thread.

See also: meta, unwrap, SaverSchema, initialize_tracer, finalize_tracer

Example

julia
wrap_schema = TracerSchema(OpenAISchema())
 msg = aigenerate(wrap_schema, "Say hi!"; model="gpt-4")
 # output type should be TracerMessage
-msg isa TracerMessage

You can define your own tracer schema and the corresponding methods: initialize_tracer, finalize_tracer. See src/llm_tracer.jl

source


# PromptingTools.UserMessageType.
julia
UserMessage

A message type for user-generated text-based responses. Consumed by ai* functions to generate responses.

Fields

  • content::T: The content of the message.

  • variables::Vector{Symbol}: The variables in the message.

  • name::Union{Nothing, String}: The name of the role in the conversation.

source


# PromptingTools.UserMessageWithImagesType.
julia
UserMessageWithImages

A message type for user-generated text-based responses with images. Consumed by ai* functions to generate responses.

Fields

  • content::T: The content of the message.

  • image_url::Vector{String}: The URLs of the images.

  • variables::Vector{Symbol}: The variables in the message.

  • name::Union{Nothing, String}: The name of the role in the conversation.

source


# PromptingTools.UserMessageWithImagesMethod.

Construct UserMessageWithImages with 1 or more images. Images can be either URLs or local paths.

source


# PromptingTools.X123Type.

With docstring

source


# OpenAI.create_chatMethod.
julia
OpenAI.create_chat(schema::CustomOpenAISchema,
+msg isa TracerMessage

You can define your own tracer schema and the corresponding methods: initialize_tracer, finalize_tracer. See src/llm_tracer.jl

source


# PromptingTools.UserMessageType.
julia
UserMessage

A message type for user-generated text-based responses. Consumed by ai* functions to generate responses.

Fields

  • content::T: The content of the message.

  • variables::Vector{Symbol}: The variables in the message.

  • name::Union{Nothing, String}: The name of the role in the conversation.

source


# PromptingTools.UserMessageWithImagesType.
julia
UserMessageWithImages

A message type for user-generated text-based responses with images. Consumed by ai* functions to generate responses.

Fields

  • content::T: The content of the message.

  • image_url::Vector{String}: The URLs of the images.

  • variables::Vector{Symbol}: The variables in the message.

  • name::Union{Nothing, String}: The name of the role in the conversation.

source


# PromptingTools.UserMessageWithImagesMethod.

Construct UserMessageWithImages with 1 or more images. Images can be either URLs or local paths.

source


# PromptingTools.X123Type.

With docstring

source


# OpenAI.create_chatMethod.
julia
OpenAI.create_chat(schema::CustomOpenAISchema,
     api_key::AbstractString,
     model::AbstractString,
     conversation;
     http_kwargs::NamedTuple = NamedTuple(),
     streamcallback::Any = nothing,
     url::String = "http://localhost:8080",
-    kwargs...)

Dispatch to the OpenAI.create_chat function, for any OpenAI-compatible API.

It expects url keyword argument. Provide it to the aigenerate function via api_kwargs=(; url="my-url")

It will forward your query to the "chat/completions" endpoint of the base URL that you provided (=url).

source


# OpenAI.create_chatMethod.
julia
OpenAI.create_chat(schema::LocalServerOpenAISchema,
+    kwargs...)

Dispatch to the OpenAI.create_chat function, for any OpenAI-compatible API.

It expects url keyword argument. Provide it to the aigenerate function via api_kwargs=(; url="my-url")

It will forward your query to the "chat/completions" endpoint of the base URL that you provided (=url).

source


# OpenAI.create_chatMethod.
julia
OpenAI.create_chat(schema::LocalServerOpenAISchema,
     api_key::AbstractString,
     model::AbstractString,
     conversation;
     url::String = "http://localhost:8080",
-    kwargs...)

Dispatch to the OpenAI.create_chat function, but with the LocalServer API parameters, ie, defaults to url specified by the LOCAL_SERVERpreference. See?PREFERENCES

source


# OpenAI.create_chatMethod.
julia
OpenAI.create_chat(schema::MistralOpenAISchema,

api_key::AbstractString, model::AbstractString, conversation; url::String="https://api.mistral.ai/v1", kwargs...)

Dispatch to the OpenAI.create_chat function, but with the MistralAI API parameters.

It tries to access the MISTRALAI_API_KEY ENV variable, but you can also provide it via the api_key keyword argument.

source


# PromptingTools.aiclassifyMethod.
julia
aiclassify(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;
-    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiclassify call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

  • calls initialize_tracer

  • calls aiclassify (with the tracer_schema.schema)

  • calls finalize_tracer

source


# PromptingTools.aiclassifyMethod.
julia
aiclassify(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;
+    kwargs...)

Dispatch to the OpenAI.create_chat function, but with the LocalServer API parameters, ie, defaults to url specified by the LOCAL_SERVERpreference. See?PREFERENCES

source


# OpenAI.create_chatMethod.
julia
OpenAI.create_chat(schema::MistralOpenAISchema,

api_key::AbstractString, model::AbstractString, conversation; url::String="https://api.mistral.ai/v1", kwargs...)

Dispatch to the OpenAI.create_chat function, but with the MistralAI API parameters.

It tries to access the MISTRALAI_API_KEY ENV variable, but you can also provide it via the api_key keyword argument.

source


# PromptingTools.aiclassifyMethod.
julia
aiclassify(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;
+    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiclassify call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

  • calls initialize_tracer

  • calls aiclassify (with the tracer_schema.schema)

  • calls finalize_tracer

source


# PromptingTools.aiclassifyMethod.
julia
aiclassify(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;
     choices::AbstractVector{T} = ["true", "false", "unknown"],
     model::AbstractString = MODEL_CHAT,
     api_kwargs::NamedTuple = NamedTuple(),
@@ -162,9 +162,9 @@
 # "A"

You can still use a simple true/false classification:

julia
aiclassify("Is two plus two four?") # true
 aiclassify("Is two plus three a vegetable on Mars?") # false

aiclassify returns only true/false/unknown. It's easy to get the proper Bool output type out with tryparse, eg,

julia
tryparse(Bool, aiclassify("Is two plus two four?")) isa Bool # true

Output of type Nothing marks that the model couldn't classify the statement as true/false.

Ideally, we would like to re-use some helpful system prompt to get more accurate responses. For this reason we have templates, eg, :JudgeIsItTrue. By specifying the template, we can provide our statement as the expected variable (it in this case) See that the model now correctly classifies the statement as "unknown".

julia
aiclassify(:JudgeIsItTrue; it = "Is two plus three a vegetable on Mars?") # unknown

For better results, use higher quality models like gpt4, eg,

julia
aiclassify(:JudgeIsItTrue;
     it = "If I had two apples and I got three more, I have five apples now.",
-    model = "gpt4") # true

source


# PromptingTools.aiembedFunction.
julia
aiembed(tracer_schema::AbstractTracerSchema,
+    model = "gpt4") # true

source


# PromptingTools.aiembedFunction.
julia
aiembed(tracer_schema::AbstractTracerSchema,
     doc_or_docs::Union{AbstractString, AbstractVector{<:AbstractString}}, postprocess::Function = identity;
-    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiembed call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

  • calls initialize_tracer

  • calls aiembed (with the tracer_schema.schema)

  • calls finalize_tracer

source


# PromptingTools.aiembedMethod.
julia
aiembed(prompt_schema::AbstractOllamaManagedSchema,
+    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiembed call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

  • calls initialize_tracer

  • calls aiembed (with the tracer_schema.schema)

  • calls finalize_tracer

source


# PromptingTools.aiembedMethod.
julia
aiembed(prompt_schema::AbstractOllamaManagedSchema,
         doc_or_docs::Union{AbstractString, AbstractVector{<:AbstractString}},
         postprocess::F = identity;
         verbose::Bool = true,
@@ -193,7 +193,7 @@
 schema = PT.OllamaManagedSchema()
 
 msg = aiembed(schema, "Hello World", copy; model="openhermes2.5-mistral")
-msg.content # 4096-element Vector{Float64}

source


# PromptingTools.aiembedMethod.
julia
aiembed(prompt_schema::AbstractOpenAISchema,
+msg.content # 4096-element Vector{Float64}

source


# PromptingTools.aiembedMethod.
julia
aiembed(prompt_schema::AbstractOpenAISchema,
         doc_or_docs::Union{AbstractString, AbstractVector{<:AbstractString}},
         postprocess::F = identity;
         verbose::Bool = true,
@@ -209,7 +209,7 @@
 msg = aiembed(["embed me", "and me too"], LinearAlgebra.normalize)
 
 # calculate cosine distance between the two normalized embeddings as a simple dot product
-msg.content' * msg.content[:, 1] # [1.0, 0.787]

source


# PromptingTools.aiextractMethod.
julia
aiextract(prompt_schema::AbstractAnthropicSchema, prompt::ALLOWED_PROMPT_TYPE;
+msg.content' * msg.content[:, 1] # [1.0, 0.787]

source


# PromptingTools.aiextractMethod.
julia
aiextract(prompt_schema::AbstractAnthropicSchema, prompt::ALLOWED_PROMPT_TYPE;
     return_type::Union{Type, AbstractTool, Vector},
     verbose::Bool = true,
     api_key::String = ANTHROPIC_API_KEY,
@@ -269,7 +269,7 @@
     :condition => String,
     :condition__description => "Current weather condition (e.g., sunny, rainy, cloudy)"
 ]
-msg = aiextract("The weather in New York is sunny and 72.5 degrees Fahrenheit."; return_type = fields_with_descriptions, model="claudeh")

source


# PromptingTools.aiextractMethod.
julia
aiextract(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;
+msg = aiextract("The weather in New York is sunny and 72.5 degrees Fahrenheit."; return_type = fields_with_descriptions, model="claudeh")

source


# PromptingTools.aiextractMethod.
julia
aiextract(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;
     return_type::Union{Type, AbstractTool, Vector},
     verbose::Bool = true,
     api_key::String = OPENAI_API_KEY,
@@ -336,8 +336,8 @@
 # PromptingTools.DataMessage(NamedTuple)
 
 msg.content
-# (location = "New York", temperature = 72.5, condition = "sunny")

It works equally well for structs provided as return types:

julia
msg = aiextract("James is 30, weighs 80kg. He's 180cm tall."; return_type=MyMeasurement, json_mode=true)

source


# PromptingTools.aiextractMethod.
julia
aiextract(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;
-    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiextract call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

  • calls initialize_tracer

  • calls aiextract (with the tracer_schema.schema)

  • calls finalize_tracer

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractAnthropicSchema, prompt::ALLOWED_PROMPT_TYPE; verbose::Bool = true,
+# (location = "New York", temperature = 72.5, condition = "sunny")

It works equally well for structs provided as return types:

julia
msg = aiextract("James is 30, weighs 80kg. He's 180cm tall."; return_type=MyMeasurement, json_mode=true)

source


# PromptingTools.aiextractMethod.
julia
aiextract(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;
+    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiextract call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

  • calls initialize_tracer

  • calls aiextract (with the tracer_schema.schema)

  • calls finalize_tracer

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractAnthropicSchema, prompt::ALLOWED_PROMPT_TYPE; verbose::Bool = true,
     api_key::String = ANTHROPIC_API_KEY, model::String = MODEL_CHAT,
     return_all::Bool = false, dry_run::Bool = false,
     conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],
@@ -374,7 +374,7 @@
 
 # Get verbose output with details of each chunk
 streamcallback = PT.StreamCallback(; verbose=true, throw_on_error=true)
-msg = aigenerate("Count from 1 to 10."; streamcallback, model="claudeh")

Note: Streaming support is only for Anthropic models and it doesn't yet support tool calling and a few other features (logprobs, refusals, etc.)

You can also provide a prefill for the AI response to steer the response in a certain direction (eg, formatting, style):

julia
msg = aigenerate("Sum up 1 to 100."; aiprefill = "I'd be happy to answer in one number without any additional text. The answer is:", model="claudeh")

Note: It MUST NOT end with a trailing with space. You'll get an API error if you do.

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractGoogleSchema, prompt::ALLOWED_PROMPT_TYPE;
+msg = aigenerate("Count from 1 to 10."; streamcallback, model="claudeh")

Note: Streaming support is only for Anthropic models and it doesn't yet support tool calling and a few other features (logprobs, refusals, etc.)

You can also provide a prefill for the AI response to steer the response in a certain direction (eg, formatting, style):

julia
msg = aigenerate("Sum up 1 to 100."; aiprefill = "I'd be happy to answer in one number without any additional text. The answer is:", model="claudeh")

Note: It MUST NOT end with a trailing with space. You'll get an API error if you do.

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractGoogleSchema, prompt::ALLOWED_PROMPT_TYPE;
     verbose::Bool = true,
     api_key::String = GOOGLE_API_KEY,
     model::String = "gemini-pro", return_all::Bool = false, dry_run::Bool = false,
@@ -394,7 +394,7 @@
     PT.SystemMessage("You're master Yoda from Star Wars trying to help the user become a Yedi."),
     PT.UserMessage("I have feelings for my iPhone. What should I do?")]
 msg=aigenerate(conversation; model="gemini")
-# AIMessage("Young Padawan, you have stumbled into a dangerous path.... <continues>")

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractOllamaManagedSchema, prompt::ALLOWED_PROMPT_TYPE; verbose::Bool = true,
+# AIMessage("Young Padawan, you have stumbled into a dangerous path.... <continues>")

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractOllamaManagedSchema, prompt::ALLOWED_PROMPT_TYPE; verbose::Bool = true,
     api_key::String = "", model::String = MODEL_CHAT,
     return_all::Bool = false, dry_run::Bool = false,
     conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],
@@ -419,7 +419,7 @@
 
 msg = aigenerate(schema, conversation; model="openhermes2.5-mistral")
 # [ Info: Tokens: 111 in 2.1 seconds
-# AIMessage("Strong the attachment is, it leads to suffering it may. Focus on the force within you must, ...<continues>")

Note: Managed Ollama currently supports at most 1 User Message and 1 System Message given the API limitations. If you want more, you need to use the ChatMLSchema.

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractOllamaManagedSchema, prompt::ALLOWED_PROMPT_TYPE; verbose::Bool = true,
+# AIMessage("Strong the attachment is, it leads to suffering it may. Focus on the force within you must, ...<continues>")

Note: Managed Ollama currently supports at most 1 User Message and 1 System Message given the API limitations. If you want more, you need to use the ChatMLSchema.

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractOllamaManagedSchema, prompt::ALLOWED_PROMPT_TYPE; verbose::Bool = true,
     api_key::String = "", model::String = MODEL_CHAT,
     return_all::Bool = false, dry_run::Bool = false,
     conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],
@@ -444,7 +444,7 @@
 
 msg = aigenerate(schema, conversation; model="openhermes2.5-mistral")
 # [ Info: Tokens: 111 in 2.1 seconds
-# AIMessage("Strong the attachment is, it leads to suffering it may. Focus on the force within you must, ...<continues>")

Note: Managed Ollama currently supports at most 1 User Message and 1 System Message given the API limitations. If you want more, you need to use the ChatMLSchema.

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;
+# AIMessage("Strong the attachment is, it leads to suffering it may. Focus on the force within you must, ...<continues>")

Note: Managed Ollama currently supports at most 1 User Message and 1 System Message given the API limitations. If you want more, you need to use the ChatMLSchema.

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;
     verbose::Bool = true,
     api_key::String = OPENAI_API_KEY,
     model::String = MODEL_CHAT, return_all::Bool = false, dry_run::Bool = false,
@@ -477,14 +477,14 @@
 
 # Get verbose output with details of each chunk
 streamcallback = PT.StreamCallback(; verbose=true, throw_on_error=true)
-msg = aigenerate("Count from 1 to 10."; streamcallback)

Learn more in ?StreamCallback. Note: Streaming support is only for OpenAI models and it doesn't yet support tool calling and a few other features (logprobs, refusals, etc.)

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;
+msg = aigenerate("Count from 1 to 10."; streamcallback)

Learn more in ?StreamCallback. Note: Streaming support is only for OpenAI models and it doesn't yet support tool calling and a few other features (logprobs, refusals, etc.)

source


# PromptingTools.aigenerateMethod.
julia
aigenerate(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;
     tracer_kwargs = NamedTuple(), model = "", return_all::Bool = false, kwargs...)

Wraps the normal aigenerate call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

  • calls initialize_tracer

  • calls aigenerate (with the tracer_schema.schema)

  • calls finalize_tracer

Example

julia
wrap_schema = PT.TracerSchema(PT.OpenAISchema())
 msg = aigenerate(wrap_schema, "Say hi!"; model = "gpt4t")
 msg isa TracerMessage # true
 msg.content # access content like if it was the message
 PT.pprint(msg) # pretty-print the message

It works on a vector of messages and converts only the non-tracer ones, eg,

julia
wrap_schema = PT.TracerSchema(PT.OpenAISchema())
 conv = aigenerate(wrap_schema, "Say hi!"; model = "gpt4t", return_all = true)
-all(PT.istracermessage, conv) #true

source


# PromptingTools.aiimageMethod.
julia
aiimage(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;
+all(PT.istracermessage, conv) #true

source


# PromptingTools.aiimageMethod.
julia
aiimage(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;
     image_size::AbstractString = "1024x1024",
     image_quality::AbstractString = "standard",
     image_n::Integer = 1,
@@ -512,8 +512,8 @@
 
 # Then you need to use Base64 package to decode it and save it to a file:
 using Base64
-write("cat_on_car_hd.png", base64decode(msg.content[:b64_json]));

source


# PromptingTools.aiimageMethod.
julia
aiimage(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;
-    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiimage call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

  • calls initialize_tracer

  • calls aiimage (with the tracer_schema.schema)

  • calls finalize_tracer

source


# PromptingTools.aiscanMethod.
julia
aiscan([prompt_schema::AbstractOllamaSchema,] prompt::ALLOWED_PROMPT_TYPE; 
+write("cat_on_car_hd.png", base64decode(msg.content[:b64_json]));

source


# PromptingTools.aiimageMethod.
julia
aiimage(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;
+    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiimage call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

  • calls initialize_tracer

  • calls aiimage (with the tracer_schema.schema)

  • calls finalize_tracer

source


# PromptingTools.aiscanMethod.
julia
aiscan([prompt_schema::AbstractOllamaSchema,] prompt::ALLOWED_PROMPT_TYPE; 
 image_url::Union{Nothing, AbstractString, Vector{<:AbstractString}} = nothing,
 image_path::Union{Nothing, AbstractString, Vector{<:AbstractString}} = nothing,
 attach_to_latest::Bool = true,
@@ -540,7 +540,7 @@
 # You can add syntax highlighting of the outputs via Markdown
 using Markdown
 msg.content |> Markdown.parse

Local models cannot handle image URLs directly (image_url), so you need to download the image first and provide it as image_path:

julia
using Downloads
-image_path = Downloads.download(image_url)

Notice that we set max_tokens = 2500. If your outputs seem truncated, it might be because the default maximum tokens on the server is set too low!

source


# PromptingTools.aiscanMethod.
julia
aiscan([prompt_schema::AbstractOpenAISchema,] prompt::ALLOWED_PROMPT_TYPE; 
+image_path = Downloads.download(image_url)

Notice that we set max_tokens = 2500. If your outputs seem truncated, it might be because the default maximum tokens on the server is set too low!

source


# PromptingTools.aiscanMethod.
julia
aiscan([prompt_schema::AbstractOpenAISchema,] prompt::ALLOWED_PROMPT_TYPE; 
 image_url::Union{Nothing, AbstractString, Vector{<:AbstractString}} = nothing,
 image_path::Union{Nothing, AbstractString, Vector{<:AbstractString}} = nothing,
 image_detail::AbstractString = "auto",
@@ -566,8 +566,8 @@
 
 # You can add syntax highlighting of the outputs via Markdown
 using Markdown
-msg.content |> Markdown.parse

Notice that we enforce max_tokens = 2500. That's because OpenAI seems to default to ~300 tokens, which provides incomplete outputs. Hence, we set this value to 2500 as a default. If you still get truncated outputs, increase this value.

source


# PromptingTools.aiscanMethod.
julia
aiscan(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;
-    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiscan call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

  • calls initialize_tracer

  • calls aiscan (with the tracer_schema.schema)

  • calls finalize_tracer

source


# PromptingTools.aitemplatesFunction.
julia
aitemplates

Find easily the most suitable templates for your use case.

You can search by:

  • query::Symbol which looks look only for partial matches in the template name

  • query::AbstractString which looks for partial matches in the template name or description

  • query::Regex which looks for matches in the template name, description or any of the message previews

Keyword Arguments

  • limit::Int limits the number of returned templates (Defaults to 10)

Examples

Find available templates with aitemplates:

julia
tmps = aitemplates("JuliaExpertAsk")
+msg.content |> Markdown.parse

Notice that we enforce max_tokens = 2500. That's because OpenAI seems to default to ~300 tokens, which provides incomplete outputs. Hence, we set this value to 2500 as a default. If you still get truncated outputs, increase this value.

source


# PromptingTools.aiscanMethod.
julia
aiscan(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;
+    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aiscan call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

  • calls initialize_tracer

  • calls aiscan (with the tracer_schema.schema)

  • calls finalize_tracer

source


# PromptingTools.aitemplatesFunction.
julia
aitemplates

Find easily the most suitable templates for your use case.

You can search by:

  • query::Symbol which looks look only for partial matches in the template name

  • query::AbstractString which looks for partial matches in the template name or description

  • query::Regex which looks for matches in the template name, description or any of the message previews

Keyword Arguments

  • limit::Int limits the number of returned templates (Defaults to 10)

Examples

Find available templates with aitemplates:

julia
tmps = aitemplates("JuliaExpertAsk")
 # Will surface one specific template
 # 1-element Vector{AITemplateMetadata}:
 # PromptingTools.AITemplateMetadata
@@ -582,7 +582,7 @@
 {{ask}}"
 #   source: String ""

The above gives you a good idea of what the template is about, what placeholders are available, and how much it would cost to use it (=wordcount).

Search for all Julia-related templates:

julia
tmps = aitemplates("Julia")
 # 2-element Vector{AITemplateMetadata}... -> more to come later!

If you are on VSCode, you can leverage nice tabular display with vscodedisplay:

julia
using DataFrames
-tmps = aitemplates("Julia") |> DataFrame |> vscodedisplay

I have my selected template, how do I use it? Just use the "name" in aigenerate or aiclassify like you see in the first example!

source


# PromptingTools.aitemplatesMethod.

Find the top-limit templates whose name or description fields partially match the query_key::String in TEMPLATE_METADATA.

source


# PromptingTools.aitemplatesMethod.

Find the top-limit templates where provided query_key::Regex matches either of name, description or previews or User or System messages in TEMPLATE_METADATA.

source


# PromptingTools.aitemplatesMethod.

Find the top-limit templates whose name::Symbol exactly matches the query_name::Symbol in TEMPLATE_METADATA.

source


# PromptingTools.aitoolsMethod.
julia
aitools(prompt_schema::AbstractAnthropicSchema, prompt::ALLOWED_PROMPT_TYPE;
+tmps = aitemplates("Julia") |> DataFrame |> vscodedisplay

I have my selected template, how do I use it? Just use the "name" in aigenerate or aiclassify like you see in the first example!

source


# PromptingTools.aitemplatesMethod.

Find the top-limit templates whose name or description fields partially match the query_key::String in TEMPLATE_METADATA.

source


# PromptingTools.aitemplatesMethod.

Find the top-limit templates where provided query_key::Regex matches either of name, description or previews or User or System messages in TEMPLATE_METADATA.

source


# PromptingTools.aitemplatesMethod.

Find the top-limit templates whose name::Symbol exactly matches the query_name::Symbol in TEMPLATE_METADATA.

source


# PromptingTools.aitoolsMethod.
julia
aitools(prompt_schema::AbstractAnthropicSchema, prompt::ALLOWED_PROMPT_TYPE;
     kwargs...)
     tools::Union{Type, Function, Method, AbstractTool, Vector} = Tool[],
     verbose::Bool = true,
@@ -627,7 +627,7 @@
 # AIToolRequest("-"; Tool Requests: 1)
 # ToolMessage("The weather in Tokyo on 2023-05-03 is 70 degrees.")
 # UserMessage("And in New York?")
-# AIToolRequest("-"; Tool Requests: 1)

source


# PromptingTools.aitoolsMethod.
julia
aitools(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;
+# AIToolRequest("-"; Tool Requests: 1)

source


# PromptingTools.aitoolsMethod.
julia
aitools(prompt_schema::AbstractOpenAISchema, prompt::ALLOWED_PROMPT_TYPE;
     tools::Union{Type, Function, Method, AbstractTool, Vector} = Tool[],
     verbose::Bool = true,
     api_key::String = OPENAI_API_KEY,
@@ -682,8 +682,8 @@
 # AIToolRequest("-"; Tool Requests: 1)
 # ToolMessage("The weather in Tokyo on 2023-05-03 is 70 degrees.")
 # UserMessage("And in New York?")
-# AIToolRequest("-"; Tool Requests: 1)

source


# PromptingTools.aitoolsMethod.
julia
aitools(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;
-    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aitools call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

  • calls initialize_tracer

  • calls aiextract (with the tracer_schema.schema)

  • calls finalize_tracer

source


# PromptingTools.align_tracer!Method.

Aligns multiple tracers in the vector to have the same Parent and Thread IDs as the first item.

source


# PromptingTools.align_tracer!Method.

Aligns the tracer message, updating the parent_id, thread_id. Often used to align multiple tracers in the vector to have the same IDs.

source


# PromptingTools.anthropic_apiFunction.
julia
anthropic_api(
+# AIToolRequest("-"; Tool Requests: 1)

source


# PromptingTools.aitoolsMethod.
julia
aitools(tracer_schema::AbstractTracerSchema, prompt::ALLOWED_PROMPT_TYPE;
+    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Wraps the normal aitools call in a tracing/callback system. Use tracer_kwargs to provide any information necessary to the tracer/callback system only (eg, parent_id, thread_id, run_id).

Logic:

  • calls initialize_tracer

  • calls aiextract (with the tracer_schema.schema)

  • calls finalize_tracer

source


# PromptingTools.align_tracer!Method.

Aligns multiple tracers in the vector to have the same Parent and Thread IDs as the first item.

source


# PromptingTools.align_tracer!Method.

Aligns the tracer message, updating the parent_id, thread_id. Often used to align multiple tracers in the vector to have the same IDs.

source


# PromptingTools.anthropic_apiFunction.
julia
anthropic_api(
     prompt_schema::AbstractAnthropicSchema,
     messages::Vector{<:AbstractDict{String, <:Any}} = Vector{Dict{String, Any}}();
     api_key::AbstractString = ANTHROPIC_API_KEY,
@@ -694,16 +694,16 @@
     stream::Bool = false,
     url::String = "https://api.anthropic.com/v1",
     cache::Union{Nothing, Symbol} = nothing,
-    kwargs...)

Simple wrapper for a call to Anthropic API.

Keyword Arguments

  • prompt_schema: Defines which prompt template should be applied.

  • messages: a vector of AbstractMessage to send to the model

  • system: An optional string representing the system message for the AI conversation. If not provided, a default message will be used.

  • endpoint: The API endpoint to call, only "messages" are currently supported. Defaults to "messages".

  • model: A string representing the model to use for generating the response. Can be an alias corresponding to a model ID defined in MODEL_ALIASES.

  • max_tokens: The maximum number of tokens to generate. Defaults to 2048.

  • http_kwargs::NamedTuple: Additional keyword arguments for the HTTP request. Defaults to empty NamedTuple.

  • stream: A boolean indicating whether to stream the response. Defaults to false.

  • url: The URL of the Ollama API. Defaults to "localhost".

  • cache: A symbol representing the caching strategy to be used. Currently only nothing (no caching), :system, :tools,:last and :all are supported.

  • kwargs: Prompt variables to be used to fill the prompt/template

source


# PromptingTools.anthropic_extra_headersMethod.
julia
anthropic_extra_headers

Adds API version and beta headers to the request.

Kwargs / Beta headers

  • has_tools: Enables tools in the conversation.

  • has_cache: Enables prompt caching.

  • has_long_output: Enables long outputs (up to 8K tokens) with Anthropic's Sonnet 3.5.

source


# PromptingTools.auth_headerMethod.
julia
auth_header(api_key::Union{Nothing, AbstractString};
+    kwargs...)

Simple wrapper for a call to Anthropic API.

Keyword Arguments

  • prompt_schema: Defines which prompt template should be applied.

  • messages: a vector of AbstractMessage to send to the model

  • system: An optional string representing the system message for the AI conversation. If not provided, a default message will be used.

  • endpoint: The API endpoint to call, only "messages" are currently supported. Defaults to "messages".

  • model: A string representing the model to use for generating the response. Can be an alias corresponding to a model ID defined in MODEL_ALIASES.

  • max_tokens: The maximum number of tokens to generate. Defaults to 2048.

  • http_kwargs::NamedTuple: Additional keyword arguments for the HTTP request. Defaults to empty NamedTuple.

  • stream: A boolean indicating whether to stream the response. Defaults to false.

  • url: The URL of the Ollama API. Defaults to "localhost".

  • cache: A symbol representing the caching strategy to be used. Currently only nothing (no caching), :system, :tools,:last and :all are supported.

  • kwargs: Prompt variables to be used to fill the prompt/template

source


# PromptingTools.anthropic_extra_headersMethod.
julia
anthropic_extra_headers

Adds API version and beta headers to the request.

Kwargs / Beta headers

  • has_tools: Enables tools in the conversation.

  • has_cache: Enables prompt caching.

  • has_long_output: Enables long outputs (up to 8K tokens) with Anthropic's Sonnet 3.5.

source


# PromptingTools.auth_headerMethod.
julia
auth_header(api_key::Union{Nothing, AbstractString};
     bearer::Bool = true,
     x_api_key::Bool = false,
     extra_headers::AbstractVector = Vector{
         Pair{String, String},
     }[],
-    kwargs...)

Creates the authentication headers for any API request. Assumes that the communication is done in JSON format.

Arguments

  • api_key::Union{Nothing, AbstractString}: The API key to be used for authentication. If Nothing, no authentication is used.

  • bearer::Bool: Provide the API key in the Authorization: Bearer ABC format. Defaults to true.

  • x_api_key::Bool: Provide the API key in the Authorization: x-api-key: ABC format. Defaults to false.

source


# PromptingTools.build_response_bodyMethod.
julia
build_response_body(
-    flavor::AnthropicStream, cb::AbstractStreamCallback; verbose::Bool = false, kwargs...)

Build the response body from the chunks to mimic receiving a standard response from the API.

Note: Limited functionality for now. Does NOT support tool use. Use standard responses for these.

source


# PromptingTools.build_response_bodyMethod.
julia
build_response_body(flavor::OpenAIStream, cb::AbstractStreamCallback; verbose::Bool = false, kwargs...)

Build the response body from the chunks to mimic receiving a standard response from the API.

Note: Limited functionality for now. Does NOT support tool use, refusals, logprobs. Use standard responses for these.

source


# PromptingTools.build_template_metadataFunction.
julia
build_template_metadata(
+    kwargs...)

Creates the authentication headers for any API request. Assumes that the communication is done in JSON format.

Arguments

  • api_key::Union{Nothing, AbstractString}: The API key to be used for authentication. If Nothing, no authentication is used.

  • bearer::Bool: Provide the API key in the Authorization: Bearer ABC format. Defaults to true.

  • x_api_key::Bool: Provide the API key in the Authorization: x-api-key: ABC format. Defaults to false.

source


# PromptingTools.build_response_bodyMethod.
julia
build_response_body(
+    flavor::AnthropicStream, cb::AbstractStreamCallback; verbose::Bool = false, kwargs...)

Build the response body from the chunks to mimic receiving a standard response from the API.

Note: Limited functionality for now. Does NOT support tool use. Use standard responses for these.

source


# PromptingTools.build_response_bodyMethod.
julia
build_response_body(flavor::OpenAIStream, cb::AbstractStreamCallback; verbose::Bool = false, kwargs...)

Build the response body from the chunks to mimic receiving a standard response from the API.

Note: Limited functionality for now. Does NOT support tool use, refusals, logprobs. Use standard responses for these.

source


# PromptingTools.build_template_metadataFunction.
julia
build_template_metadata(
     template::AbstractVector{<:AbstractMessage}, template_name::Symbol,
-    metadata_msgs::AbstractVector{<:MetadataMessage} = MetadataMessage[]; max_length::Int = 100)

Builds AITemplateMetadata for a given template based on the messages in template and other information.

AITemplateMetadata is a helper struct for easy searching and reviewing of templates via aitemplates().

Note: Assumes that there is only ever one UserMessage and SystemMessage (concatenates them together)

source


# PromptingTools.call_costMethod.
julia
call_cost(prompt_tokens::Int, completion_tokens::Int, model::String;
+    metadata_msgs::AbstractVector{<:MetadataMessage} = MetadataMessage[]; max_length::Int = 100)

Builds AITemplateMetadata for a given template based on the messages in template and other information.

AITemplateMetadata is a helper struct for easy searching and reviewing of templates via aitemplates().

Note: Assumes that there is only ever one UserMessage and SystemMessage (concatenates them together)

source


# PromptingTools.call_costMethod.
julia
call_cost(prompt_tokens::Int, completion_tokens::Int, model::String;
     cost_of_token_prompt::Number = get(MODEL_REGISTRY,
         model,
         (; cost_of_token_prompt = 0.0)).cost_of_token_prompt,
@@ -725,8 +725,8 @@
 
 # Using custom token costs
 cost2 = call_cost(10, 20, "model3"; cost_of_token_prompt = 0.08, cost_of_token_generation = 0.12)
-# cost2 = 10 * 0.08 + 20 * 0.12 = 3.2

source


# PromptingTools.call_cost_alternativeMethod.

call_cost_alternative()

Alternative cost calculation. Used to calculate cost of image generation with DALL-E 3 and similar.

source


# PromptingTools.callbackMethod.
julia
callback(cb::AbstractStreamCallback, chunk::StreamChunk; kwargs...)

Process the chunk to be printed and print it. It's a wrapper for two operations:

  • extract the content from the chunk using extract_content

  • print the content to the output stream using print_content

source


# PromptingTools.configure_callback!Method.
julia
configure_callback!(cb::StreamCallback, schema::AbstractPromptSchema;
-    api_kwargs...)

Configures the callback cb for streaming with a given prompt schema.

If no cb.flavor is provided, adjusts the flavor and the provided api_kwargs as necessary. Eg, for most schemas, we add kwargs like stream = true to the api_kwargs.

If cb.flavor is provided, both callback and api_kwargs are left unchanged! You need to configure them yourself!

source


# PromptingTools.create_templateMethod.
julia
create_template(; user::AbstractString, system::AbstractString="Act as a helpful AI assistant.", 
+# cost2 = 10 * 0.08 + 20 * 0.12 = 3.2

source


# PromptingTools.call_cost_alternativeMethod.

call_cost_alternative()

Alternative cost calculation. Used to calculate cost of image generation with DALL-E 3 and similar.

source


# PromptingTools.callbackMethod.
julia
callback(cb::AbstractStreamCallback, chunk::StreamChunk; kwargs...)

Process the chunk to be printed and print it. It's a wrapper for two operations:

  • extract the content from the chunk using extract_content

  • print the content to the output stream using print_content

source


# PromptingTools.configure_callback!Method.
julia
configure_callback!(cb::StreamCallback, schema::AbstractPromptSchema;
+    api_kwargs...)

Configures the callback cb for streaming with a given prompt schema.

If no cb.flavor is provided, adjusts the flavor and the provided api_kwargs as necessary. Eg, for most schemas, we add kwargs like stream = true to the api_kwargs.

If cb.flavor is provided, both callback and api_kwargs are left unchanged! You need to configure them yourself!

source


# PromptingTools.create_templateMethod.
julia
create_template(; user::AbstractString, system::AbstractString="Act as a helpful AI assistant.", 
     load_as::Union{Nothing, Symbol, AbstractString} = nothing)
 
 create_template(system::AbstractString, user::AbstractString, 
@@ -753,11 +753,11 @@
 tpl=PT.create_template("You must speak like a pirate", "Say hi to {{name}}"; load_as="GreatingPirate")
 
 # you can now use it like any other template
-aiextract(:GreatingPirate; name="Jack Sparrow")

source


# PromptingTools.decode_choicesMethod.
julia
decode_choices(schema::OpenAISchema,
+aiextract(:GreatingPirate; name="Jack Sparrow")

source


# PromptingTools.decode_choicesMethod.
julia
decode_choices(schema::OpenAISchema,
     choices::AbstractVector{<:AbstractString},
     msg::AIMessage; model::AbstractString,
     token_ids_map::Union{Nothing, Dict{<:AbstractString, <:Integer}} = nothing,
-    kwargs...)

Decodes the underlying AIMessage against the original choices to lookup what the category name was.

If it fails, it will return msg.content == nothing

source


# PromptingTools.detect_base_main_overridesMethod.
julia
detect_base_main_overrides(code_block::AbstractString)

Detects if a given code block overrides any Base or Main methods.

Returns a tuple of a boolean and a vector of the overriden methods.

source


# PromptingTools.distance_longest_common_subsequenceMethod.
julia
distance_longest_common_subsequence(
+    kwargs...)

Decodes the underlying AIMessage against the original choices to lookup what the category name was.

If it fails, it will return msg.content == nothing

source


# PromptingTools.detect_base_main_overridesMethod.
julia
detect_base_main_overrides(code_block::AbstractString)

Detects if a given code block overrides any Base or Main methods.

Returns a tuple of a boolean and a vector of the overriden methods.

source


# PromptingTools.distance_longest_common_subsequenceMethod.
julia
distance_longest_common_subsequence(
     input1::AbstractString, input2::AbstractString)
 
 distance_longest_common_subsequence(
@@ -774,7 +774,7 @@
     """
 
 dist = distance_longest_common_subsequence(story, context)
-@info "The closest context to the query: "$(first(story,20))..." is: "$(context[argmin(dist)])" (distance: $(minimum(dist)))"

source


# PromptingTools.encode_choicesMethod.
julia
encode_choices(schema::OpenAISchema, choices::AbstractVector{<:AbstractString};
+@info "The closest context to the query: "$(first(story,20))..." is: "$(context[argmin(dist)])" (distance: $(minimum(dist)))"

source


# PromptingTools.encode_choicesMethod.
julia
encode_choices(schema::OpenAISchema, choices::AbstractVector{<:AbstractString};
     model::AbstractString,
     token_ids_map::Union{Nothing, Dict{<:AbstractString, <:Integer}} = nothing,
     kwargs...)
@@ -794,52 +794,52 @@
 choices_prompt # Output: "1. "A" for any animal or creature
 2. "P" for any plant or tree
 3. "O" for everything else"
-logit_bias # Output: Dict(16 => 100, 17 => 100, 18 => 100)

source


# PromptingTools.eval!Method.
julia
eval!(cb::AbstractCodeBlock;
+logit_bias # Output: Dict(16 => 100, 17 => 100, 18 => 100)

source


# PromptingTools.eval!Method.
julia
eval!(cb::AbstractCodeBlock;
     safe_eval::Bool = true,
     capture_stdout::Bool = true,
     prefix::AbstractString = "",
-    suffix::AbstractString = "")

Evaluates a code block cb in-place. It runs automatically when AICode is instantiated with a String.

Check the outcome of evaluation with Base.isvalid(cb). If ==true, provide code block has executed successfully.

Steps:

  • If cb::AICode has not been evaluated, cb.success = nothing. After the evaluation it will be either true or false depending on the outcome

  • Parse the text in cb.code

  • Evaluate the parsed expression

  • Capture outputs of the evaluated in cb.output

  • [OPTIONAL] Capture any stdout outputs (eg, test failures) in cb.stdout

  • If any error exception is raised, it is saved in cb.error

  • Finally, if all steps were successful, success is set to cb.success = true

Keyword Arguments

  • safe_eval::Bool: If true, we first check for any Pkg operations (eg, installing new packages) and missing imports, then the code will be evaluated inside a bespoke scratch module (not to change any user variables)

  • capture_stdout::Bool: If true, we capture any stdout outputs (eg, test failures) in cb.stdout

  • prefix::AbstractString: A string to be prepended to the code block before parsing and evaluation. Useful to add some additional code definition or necessary imports. Defaults to an empty string.

  • suffix::AbstractString: A string to be appended to the code block before parsing and evaluation. Useful to check that tests pass or that an example executes. Defaults to an empty string.

source


# PromptingTools.execute_toolMethod.
julia
execute_tool(f::Function, args::AbstractDict)

Executes a function with the provided arguments.

Dictionary is un-ordered, so we need to sort the arguments first and then pass them to the function.

source


# PromptingTools.extract_chunksMethod.
julia
extract_chunks(flavor::AbstractStreamFlavor, blob::AbstractString;
-    spillover::AbstractString = "", verbose::Bool = false, kwargs...)

Extract the chunks from the received SSE blob. Shared by all streaming flavors currently.

Returns a list of StreamChunk and the next spillover (if message was incomplete).

source


# PromptingTools.extract_code_blocksMethod.
julia
extract_code_blocks(markdown_content::String) -> Vector{String}

Extract Julia code blocks from a markdown string.

This function searches through the provided markdown content, identifies blocks of code specifically marked as Julia code (using the julia ... code fence patterns), and extracts the code within these blocks. The extracted code blocks are returned as a vector of strings, with each string representing one block of Julia code.

Note: Only the content within the code fences is extracted, and the code fences themselves are not included in the output.

See also: extract_code_blocks_fallback

Arguments

  • markdown_content::String: A string containing the markdown content from which Julia code blocks are to be extracted.

Returns

  • Vector{String}: A vector containing strings of extracted Julia code blocks. If no Julia code blocks are found, an empty vector is returned.

Examples

Example with a single Julia code block

julia
markdown_single = """

julia println("Hello, World!")

"""
+    suffix::AbstractString = "")

Evaluates a code block cb in-place. It runs automatically when AICode is instantiated with a String.

Check the outcome of evaluation with Base.isvalid(cb). If ==true, provide code block has executed successfully.

Steps:

  • If cb::AICode has not been evaluated, cb.success = nothing. After the evaluation it will be either true or false depending on the outcome

  • Parse the text in cb.code

  • Evaluate the parsed expression

  • Capture outputs of the evaluated in cb.output

  • [OPTIONAL] Capture any stdout outputs (eg, test failures) in cb.stdout

  • If any error exception is raised, it is saved in cb.error

  • Finally, if all steps were successful, success is set to cb.success = true

Keyword Arguments

  • safe_eval::Bool: If true, we first check for any Pkg operations (eg, installing new packages) and missing imports, then the code will be evaluated inside a bespoke scratch module (not to change any user variables)

  • capture_stdout::Bool: If true, we capture any stdout outputs (eg, test failures) in cb.stdout

  • prefix::AbstractString: A string to be prepended to the code block before parsing and evaluation. Useful to add some additional code definition or necessary imports. Defaults to an empty string.

  • suffix::AbstractString: A string to be appended to the code block before parsing and evaluation. Useful to check that tests pass or that an example executes. Defaults to an empty string.

source


# PromptingTools.execute_toolMethod.
julia
execute_tool(f::Function, args::AbstractDict)

Executes a function with the provided arguments.

Dictionary is un-ordered, so we need to sort the arguments first and then pass them to the function.

source


# PromptingTools.extract_chunksMethod.
julia
extract_chunks(flavor::AbstractStreamFlavor, blob::AbstractString;
+    spillover::AbstractString = "", verbose::Bool = false, kwargs...)

Extract the chunks from the received SSE blob. Shared by all streaming flavors currently.

Returns a list of StreamChunk and the next spillover (if message was incomplete).

source


# PromptingTools.extract_code_blocksMethod.
julia
extract_code_blocks(markdown_content::String) -> Vector{String}

Extract Julia code blocks from a markdown string.

This function searches through the provided markdown content, identifies blocks of code specifically marked as Julia code (using the julia ... code fence patterns), and extracts the code within these blocks. The extracted code blocks are returned as a vector of strings, with each string representing one block of Julia code.

Note: Only the content within the code fences is extracted, and the code fences themselves are not included in the output.

See also: extract_code_blocks_fallback

Arguments

  • markdown_content::String: A string containing the markdown content from which Julia code blocks are to be extracted.

Returns

  • Vector{String}: A vector containing strings of extracted Julia code blocks. If no Julia code blocks are found, an empty vector is returned.

Examples

Example with a single Julia code block

julia
markdown_single = """

julia println("Hello, World!")

"""
 extract_code_blocks(markdown_single)
 # Output: ["Hello, World!"]
julia
# Example with multiple Julia code blocks
 markdown_multiple = """

julia x = 5

Some text in between

julia y = x + 2

"""
 extract_code_blocks(markdown_multiple)
-# Output: ["x = 5", "y = x + 2"]

source


# PromptingTools.extract_code_blocks_fallbackMethod.
julia
extract_code_blocks_fallback(markdown_content::String, delim::AbstractString="\n```\n")

Extract Julia code blocks from a markdown string using a fallback method (splitting by arbitrary delim-iters). Much more simplistic than extract_code_blocks and does not support nested code blocks.

It is often used as a fallback for smaller LLMs that forget to code fence julia ....

Example

julia
code = """

println("hello")


+# Output: ["x = 5", "y = x + 2"]

source


# PromptingTools.extract_code_blocks_fallbackMethod.
julia
extract_code_blocks_fallback(markdown_content::String, delim::AbstractString="\n```\n")

Extract Julia code blocks from a markdown string using a fallback method (splitting by arbitrary delim-iters). Much more simplistic than extract_code_blocks and does not support nested code blocks.

It is often used as a fallback for smaller LLMs that forget to code fence julia ....

Example

julia
code = """

println("hello")


 Some text

println("world")

"""
 
 # We extract text between triple backticks and check each blob if it looks like a valid Julia code
 code_parsed = extract_code_blocks_fallback(code) |> x -> filter(is_julia_code, x) |> x -> join(x, "
-")

source


# PromptingTools.extract_contentMethod.
julia
extract_content(flavor::AnthropicStream, chunk)

Extract the content from the chunk.

source


# PromptingTools.extract_contentMethod.
julia
extract_content(flavor::OpenAIStream, chunk::StreamChunk; kwargs...)

Extract the content from the chunk.

source


# PromptingTools.extract_docstringMethod.

Extract the docstring from a type or function.

source


# PromptingTools.extract_function_nameMethod.
julia
extract_function_name(code_block::String) -> Union{String, Nothing}

Extract the name of a function from a given Julia code block. The function searches for two patterns:

  • The explicit function declaration pattern: function name(...) ... end

  • The concise function declaration pattern: name(...) = ...

If a function name is found, it is returned as a string. If no function name is found, the function returns nothing.

To capture all function names in the block, use extract_function_names.

Arguments

  • code_block::String: A string containing Julia code.

Returns

  • Union{String, Nothing}: The extracted function name or nothing if no name is found.

Example

julia
code = """
+")

source


# PromptingTools.extract_contentMethod.
julia
extract_content(flavor::AnthropicStream, chunk)

Extract the content from the chunk.

source


# PromptingTools.extract_contentMethod.
julia
extract_content(flavor::OpenAIStream, chunk::StreamChunk; kwargs...)

Extract the content from the chunk.

source


# PromptingTools.extract_docstringMethod.

Extract the docstring from a type or function.

source


# PromptingTools.extract_function_nameMethod.
julia
extract_function_name(code_block::String) -> Union{String, Nothing}

Extract the name of a function from a given Julia code block. The function searches for two patterns:

  • The explicit function declaration pattern: function name(...) ... end

  • The concise function declaration pattern: name(...) = ...

If a function name is found, it is returned as a string. If no function name is found, the function returns nothing.

To capture all function names in the block, use extract_function_names.

Arguments

  • code_block::String: A string containing Julia code.

Returns

  • Union{String, Nothing}: The extracted function name or nothing if no name is found.

Example

julia
code = """
 function myFunction(arg1, arg2)
     # Function body
 end
 """
 extract_function_name(code)
-# Output: "myFunction"

source


# PromptingTools.extract_function_namesMethod.
julia
extract_function_names(code_block::AbstractString)

Extract one or more names of functions defined in a given Julia code block. The function searches for two patterns: - The explicit function declaration pattern: function name(...) ... end - The concise function declaration pattern: name(...) = ...

It always returns a vector of strings, even if only one function name is found (it will be empty).

For only one function name match, use extract_function_name.

source


# PromptingTools.extract_julia_importsMethod.
julia
extract_julia_imports(input::AbstractString; base_or_main::Bool = false)

Detects any using or import statements in a given string and returns the package names as a vector of symbols.

base_or_main is a boolean that determines whether to isolate only Base and Main OR whether to exclude them in the returned vector.

source


# PromptingTools.finalize_outputsMethod.
julia
finalize_outputs(prompt::ALLOWED_PROMPT_TYPE, conv_rendered::Any,
+# Output: "myFunction"

source


# PromptingTools.extract_function_namesMethod.
julia
extract_function_names(code_block::AbstractString)

Extract one or more names of functions defined in a given Julia code block. The function searches for two patterns: - The explicit function declaration pattern: function name(...) ... end - The concise function declaration pattern: name(...) = ...

It always returns a vector of strings, even if only one function name is found (it will be empty).

For only one function name match, use extract_function_name.

source


# PromptingTools.extract_julia_importsMethod.
julia
extract_julia_imports(input::AbstractString; base_or_main::Bool = false)

Detects any using or import statements in a given string and returns the package names as a vector of symbols.

base_or_main is a boolean that determines whether to isolate only Base and Main OR whether to exclude them in the returned vector.

source


# PromptingTools.finalize_outputsMethod.
julia
finalize_outputs(prompt::ALLOWED_PROMPT_TYPE, conv_rendered::Any,
     msg::Union{Nothing, AbstractMessage, AbstractVector{<:AbstractMessage}};
     return_all::Bool = false,
     dry_run::Bool = false,
     conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],
     no_system_message::Bool = false,
-    kwargs...)

Finalizes the outputs of the ai* functions by either returning the conversation history or the last message.

Keyword arguments

  • return_all::Bool=false: If true, returns the entire conversation history, otherwise returns only the last message (the AIMessage).

  • dry_run::Bool=false: If true, does not send the messages to the model, but only renders the prompt with the given schema and replacement variables. Useful for debugging when you want to check the specific schema rendering.

  • conversation::AbstractVector{<:AbstractMessage}=[]: An optional vector of AbstractMessage objects representing the conversation history. If not provided, it is initialized as an empty vector.

  • kwargs...: Variables to replace in the prompt template.

  • no_system_message::Bool=false: If true, the default system message is not included in the conversation history. Any existing system message is converted to a UserMessage.

source


# PromptingTools.finalize_tracerMethod.
julia
finalize_tracer(
+    kwargs...)

Finalizes the outputs of the ai* functions by either returning the conversation history or the last message.

Keyword arguments

  • return_all::Bool=false: If true, returns the entire conversation history, otherwise returns only the last message (the AIMessage).

  • dry_run::Bool=false: If true, does not send the messages to the model, but only renders the prompt with the given schema and replacement variables. Useful for debugging when you want to check the specific schema rendering.

  • conversation::AbstractVector{<:AbstractMessage}=[]: An optional vector of AbstractMessage objects representing the conversation history. If not provided, it is initialized as an empty vector.

  • kwargs...: Variables to replace in the prompt template.

  • no_system_message::Bool=false: If true, the default system message is not included in the conversation history. Any existing system message is converted to a UserMessage.

source


# PromptingTools.finalize_tracerMethod.
julia
finalize_tracer(
     tracer_schema::AbstractTracerSchema, tracer, msg_or_conv::Union{
         AbstractMessage, AbstractVector{<:AbstractMessage}};
-    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Finalizes the calltracer of whatever is nedeed after the ai* calls. Use tracer_kwargs to provide any information necessary (eg, parent_id, thread_id, run_id).

In the default implementation, we convert all non-tracer messages into TracerMessage.

See also: meta, unwrap, SaverSchema, initialize_tracer

source


# PromptingTools.finalize_tracerMethod.
julia
finalize_tracer(
+    tracer_kwargs = NamedTuple(), model = "", kwargs...)

Finalizes the calltracer of whatever is nedeed after the ai* calls. Use tracer_kwargs to provide any information necessary (eg, parent_id, thread_id, run_id).

In the default implementation, we convert all non-tracer messages into TracerMessage.

See also: meta, unwrap, SaverSchema, initialize_tracer

source


# PromptingTools.finalize_tracerMethod.
julia
finalize_tracer(
     tracer_schema::SaverSchema, tracer, msg_or_conv::Union{
         AbstractMessage, AbstractVector{<:AbstractMessage}};
     tracer_kwargs = NamedTuple(), model = "", kwargs...)

Finalizes the calltracer by saving the provided conversation msg_or_conv to the disk.

Default path is LOG_DIR/conversation__<first_msg_hash>__<time_received_str>.json, where LOG_DIR is set by user preferences or ENV variable (defaults to log/ in current working directory).

If you want to change the logging directory or the exact file name to log with, you can provide the following arguments to tracer_kwargs:

  • log_dir - used as the directory to save the log into when provided. Defaults to LOG_DIR if not provided.

  • log_file_path - used as the file name to save the log into when provided. This value overrules the log_dir and LOG_DIR if provided.

It can be composed with TracerSchema to also attach necessary metadata (see below).

Example

julia
wrap_schema = PT.SaverSchema(PT.TracerSchema(PT.OpenAISchema()))
 conv = aigenerate(wrap_schema,:BlankSystemUser; system="You're a French-speaking assistant!",
     user="Say hi!"; model="gpt-4", api_kwargs=(;temperature=0.1), return_all=true)
 
-# conv is a vector of messages that will be saved to a JSON together with metadata about the template and api_kwargs

See also: meta, unwrap, TracerSchema, initialize_tracer

source


# PromptingTools.find_subsequence_positionsMethod.
julia
find_subsequence_positions(subseq, seq) -> Vector{Int}

Find all positions of a subsequence subseq within a larger sequence seq. Used to lookup positions of code blocks in markdown.

This function scans the sequence seq and identifies all starting positions where the subsequence subseq is found. Both subseq and seq should be vectors of integers, typically obtained using codeunits on strings.

Arguments

  • subseq: A vector of integers representing the subsequence to search for.

  • seq: A vector of integers representing the larger sequence in which to search.

Returns

  • Vector{Int}: A vector of starting positions (1-based indices) where the subsequence is found in the sequence.

Examples

julia
find_subsequence_positions(codeunits("ab"), codeunits("cababcab")) # Returns [2, 5]

source


# PromptingTools.generate_structMethod.
julia
generate_struct(fields::Vector)

Generate a struct with the given name and fields. Fields can be specified simply as symbols (with default type String) or pairs of symbol and type. Field descriptions can be provided by adding a pair with the field name suffixed with "**description" (eg, :myfield**description => "My field description").

Returns: A tuple of (struct type, descriptions)

Examples

julia
Weather, descriptions = generate_struct(
+# conv is a vector of messages that will be saved to a JSON together with metadata about the template and api_kwargs

See also: meta, unwrap, TracerSchema, initialize_tracer

source


# PromptingTools.find_subsequence_positionsMethod.
julia
find_subsequence_positions(subseq, seq) -> Vector{Int}

Find all positions of a subsequence subseq within a larger sequence seq. Used to lookup positions of code blocks in markdown.

This function scans the sequence seq and identifies all starting positions where the subsequence subseq is found. Both subseq and seq should be vectors of integers, typically obtained using codeunits on strings.

Arguments

  • subseq: A vector of integers representing the subsequence to search for.

  • seq: A vector of integers representing the larger sequence in which to search.

Returns

  • Vector{Int}: A vector of starting positions (1-based indices) where the subsequence is found in the sequence.

Examples

julia
find_subsequence_positions(codeunits("ab"), codeunits("cababcab")) # Returns [2, 5]

source


# PromptingTools.generate_structMethod.
julia
generate_struct(fields::Vector)

Generate a struct with the given name and fields. Fields can be specified simply as symbols (with default type String) or pairs of symbol and type. Field descriptions can be provided by adding a pair with the field name suffixed with "**description" (eg, :myfield**description => "My field description").

Returns: A tuple of (struct type, descriptions)

Examples

julia
Weather, descriptions = generate_struct(
     [:location,
      :temperature=>Float64,
      :temperature__description=>"Temperature in degrees Fahrenheit",
      :condition=>String,
      :condition__description=>"Current weather condition (e.g., sunny, rainy, cloudy)"
-    ])

source


# PromptingTools.get_arg_namesMethod.

Get the argument names from a function, ignores keyword arguments!!

source


# PromptingTools.get_arg_namesMethod.

Get the argument names from a method, ignores keyword arguments!!

source


# PromptingTools.get_arg_typesMethod.

Get the argument types from a function, ignores keyword arguments!!

source


# PromptingTools.get_arg_typesMethod.

Get the argument types from a method, ignores keyword arguments!!

source


# PromptingTools.get_preferencesMethod.
julia
get_preferences(key::String)

Get preferences for PromptingTools. See ?PREFERENCES for more information.

See also: set_preferences!

Example

julia
PromptingTools.get_preferences("MODEL_CHAT")

source


# PromptingTools.ggi_generate_contentFunction.

Stub - to be extended in extension: GoogleGenAIPromptingToolsExt. ggi stands for GoogleGenAI

source


# PromptingTools.handle_error_messageMethod.
julia
handle_error_message(chunk::StreamChunk; throw_on_error::Bool = false, kwargs...)

Handles error messages from the streaming response.

source


# PromptingTools.has_julia_promptMethod.

Checks if a given string has a Julia prompt (julia>) at the beginning of a line.

source


# PromptingTools.initialize_tracerMethod.
julia
initialize_tracer(
+    ])

source


# PromptingTools.get_arg_namesMethod.

Get the argument names from a function, ignores keyword arguments!!

source


# PromptingTools.get_arg_namesMethod.

Get the argument names from a method, ignores keyword arguments!!

source


# PromptingTools.get_arg_typesMethod.

Get the argument types from a function, ignores keyword arguments!!

source


# PromptingTools.get_arg_typesMethod.

Get the argument types from a method, ignores keyword arguments!!

source


# PromptingTools.get_preferencesMethod.
julia
get_preferences(key::String)

Get preferences for PromptingTools. See ?PREFERENCES for more information.

See also: set_preferences!

Example

julia
PromptingTools.get_preferences("MODEL_CHAT")

source


# PromptingTools.ggi_generate_contentFunction.

Stub - to be extended in extension: GoogleGenAIPromptingToolsExt. ggi stands for GoogleGenAI

source


# PromptingTools.handle_error_messageMethod.
julia
handle_error_message(chunk::StreamChunk; throw_on_error::Bool = false, kwargs...)

Handles error messages from the streaming response.

source


# PromptingTools.has_julia_promptMethod.

Checks if a given string has a Julia prompt (julia>) at the beginning of a line.

source


# PromptingTools.initialize_tracerMethod.
julia
initialize_tracer(
     tracer_schema::AbstractTracerSchema; model = "", tracer_kwargs = NamedTuple(),
-    prompt::ALLOWED_PROMPT_TYPE = "", kwargs...)

Initializes tracer/callback (if necessary). Can provide any keyword arguments in tracer_kwargs (eg, parent_id, thread_id, run_id). Is executed prior to the ai* calls.

By default it captures:

  • time_sent: the time the request was sent

  • model: the model to use

  • meta: a dictionary of additional metadata that is not part of the tracer itself

    • template_name: the template to use if any

    • template_version: the template version to use if any

    • expanded api_kwargs, ie, the keyword arguments to pass to the API call

In the default implementation, we just collect the necessary data to build the tracer object in finalize_tracer.

See also: meta, unwrap, TracerSchema, SaverSchema, finalize_tracer

source


# PromptingTools.is_doneMethod.
julia
is_done(flavor, chunk)

Check if the streaming is done. Shared by all streaming flavors currently.

source


# PromptingTools.isextractedMethod.

Check if the object is an instance of AbstractExtractedData

source


# PromptingTools.last_messageMethod.

Helpful accessor for the last message in conversation. Returns the last message in the conversation.

source


# PromptingTools.last_outputMethod.

Helpful accessor for the last generated output (msg.content) in conversation. Returns the last output in the conversation (eg, the string/data in the last message).

source


# PromptingTools.length_longest_common_subsequenceMethod.
julia
length_longest_common_subsequence(itr1::AbstractString, itr2::AbstractString)

Compute the length of the longest common subsequence between two string sequences (ie, the higher the number, the better the match).

Source: https://cn.julialang.org/LeetCode.jl/dev/democards/problems/problems/1143.longest-common-subsequence/

Arguments

  • itr1: The first sequence, eg, a String.

  • itr2: The second sequence, eg, a String.

Returns

The length of the longest common subsequence.

Examples

julia
text1 = "abc-abc----"
+    prompt::ALLOWED_PROMPT_TYPE = "", kwargs...)

Initializes tracer/callback (if necessary). Can provide any keyword arguments in tracer_kwargs (eg, parent_id, thread_id, run_id). Is executed prior to the ai* calls.

By default it captures:

  • time_sent: the time the request was sent

  • model: the model to use

  • meta: a dictionary of additional metadata that is not part of the tracer itself

    • template_name: the template to use if any

    • template_version: the template version to use if any

    • expanded api_kwargs, ie, the keyword arguments to pass to the API call

In the default implementation, we just collect the necessary data to build the tracer object in finalize_tracer.

See also: meta, unwrap, TracerSchema, SaverSchema, finalize_tracer

source


# PromptingTools.is_doneMethod.
julia
is_done(flavor, chunk)

Check if the streaming is done. Shared by all streaming flavors currently.

source


# PromptingTools.isextractedMethod.

Check if the object is an instance of AbstractExtractedData

source


# PromptingTools.last_messageMethod.

Helpful accessor for the last message in conversation. Returns the last message in the conversation.

source


# PromptingTools.last_outputMethod.

Helpful accessor for the last generated output (msg.content) in conversation. Returns the last output in the conversation (eg, the string/data in the last message).

source


# PromptingTools.length_longest_common_subsequenceMethod.
julia
length_longest_common_subsequence(itr1::AbstractString, itr2::AbstractString)

Compute the length of the longest common subsequence between two string sequences (ie, the higher the number, the better the match).

Source: https://cn.julialang.org/LeetCode.jl/dev/democards/problems/problems/1143.longest-common-subsequence/

Arguments

  • itr1: The first sequence, eg, a String.

  • itr2: The second sequence, eg, a String.

Returns

The length of the longest common subsequence.

Examples

julia
text1 = "abc-abc----"
 text2 = "___ab_c__abc"
 longest_common_subsequence(text1, text2)
 # Output: 6 (-> "abcabc")

It can be used to fuzzy match strings and find the similarity between them (Tip: normalize the match)

julia
commands = ["product recommendation", "emotions", "specific product advice", "checkout advice"]
@@ -850,7 +850,7 @@
     @info "The closest command to the query: "$(query)" is: "$(commands[pos])" (distance: $(dist), normalized: $(norm))"
 end

But it might be easier to use directly the convenience wrapper distance_longest_common_subsequence!


 
-[source](https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/utils.jl#L252-L288)
+[source](https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/utils.jl#L252-L288)
 
 </div>
 <br>
@@ -863,7 +863,7 @@
 Shows the Dictionary of model aliases in the registry. Add more with `MODEL_ALIASES[alias] = model_name`.
 
 
-[source](https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/user_preferences.jl#L1009)
+[source](https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/user_preferences.jl#L1009)
 
 </div>
 <br>
@@ -876,7 +876,7 @@
 Shows the list of models in the registry. Add more with `register_model!`.
 
 
-[source](https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/user_preferences.jl#L1007)
+[source](https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/user_preferences.jl#L1007)
 
 </div>
 <br>
@@ -889,7 +889,7 @@
 Loads API keys from environment variables and preferences
 
 
-[source](https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/user_preferences.jl#L170)
+[source](https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/user_preferences.jl#L170)
 
 </div>
 <br>
@@ -900,11 +900,11 @@
 
 
 ```julia
-load_conversation(io_or_file::Union{IO, AbstractString})

Loads a conversation (messages) from io_or_file

source


# PromptingTools.load_templateMethod.
julia
load_template(io_or_file::Union{IO, AbstractString})

Loads messaging template from io_or_file and returns tuple of template messages and metadata.

source


# PromptingTools.load_templates!Function.
julia
load_templates!(dir_templates::Union{String, Nothing} = nothing;
+load_conversation(io_or_file::Union{IO, AbstractString})

Loads a conversation (messages) from io_or_file

source


# PromptingTools.load_templateMethod.
julia
load_template(io_or_file::Union{IO, AbstractString})

Loads messaging template from io_or_file and returns tuple of template messages and metadata.

source


# PromptingTools.load_templates!Function.
julia
load_templates!(dir_templates::Union{String, Nothing} = nothing;
     remember_path::Bool = true,
     remove_templates::Bool = isnothing(dir_templates),
     store::Dict{Symbol, <:Any} = TEMPLATE_STORE,
-    metadata_store::Vector{<:AITemplateMetadata} = TEMPLATE_METADATA)

Loads templates from folder templates/ in the package root and stores them in TEMPLATE_STORE and TEMPLATE_METADATA.

Note: Automatically removes any existing templates and metadata from TEMPLATE_STORE and TEMPLATE_METADATA if remove_templates=true.

Arguments

  • dir_templates::Union{String, Nothing}: The directory path to load templates from. If nothing, uses the default list of paths. It usually used only once "to register" a new template storage.

  • remember_path::Bool=true: If true, remembers the path for future refresh (in TEMPLATE_PATH).

  • remove_templates::Bool=isnothing(dir_templates): If true, removes any existing templates and metadata from store and metadata_store.

  • store::Dict{Symbol, <:Any}=TEMPLATE_STORE: The store to load the templates into.

  • metadata_store::Vector{<:AITemplateMetadata}=TEMPLATE_METADATA: The metadata store to load the metadata into.

Example

Load the default templates:

julia
PT.load_templates!() # no path needed

Load templates from a new custom path:

julia
PT.load_templates!("path/to/templates") # we will remember this path for future refresh

If you want to now refresh the default templates and the new path, just call load_templates!() without any arguments.

source


# PromptingTools.metaMethod.

Extracts the metadata dictionary from the tracer message or tracer-like object.

source


# PromptingTools.ollama_apiFunction.
julia
ollama_api(prompt_schema::Union{AbstractOllamaManagedSchema, AbstractOllamaSchema},
+    metadata_store::Vector{<:AITemplateMetadata} = TEMPLATE_METADATA)

Loads templates from folder templates/ in the package root and stores them in TEMPLATE_STORE and TEMPLATE_METADATA.

Note: Automatically removes any existing templates and metadata from TEMPLATE_STORE and TEMPLATE_METADATA if remove_templates=true.

Arguments

  • dir_templates::Union{String, Nothing}: The directory path to load templates from. If nothing, uses the default list of paths. It usually used only once "to register" a new template storage.

  • remember_path::Bool=true: If true, remembers the path for future refresh (in TEMPLATE_PATH).

  • remove_templates::Bool=isnothing(dir_templates): If true, removes any existing templates and metadata from store and metadata_store.

  • store::Dict{Symbol, <:Any}=TEMPLATE_STORE: The store to load the templates into.

  • metadata_store::Vector{<:AITemplateMetadata}=TEMPLATE_METADATA: The metadata store to load the metadata into.

Example

Load the default templates:

julia
PT.load_templates!() # no path needed

Load templates from a new custom path:

julia
PT.load_templates!("path/to/templates") # we will remember this path for future refresh

If you want to now refresh the default templates and the new path, just call load_templates!() without any arguments.

source


# PromptingTools.metaMethod.

Extracts the metadata dictionary from the tracer message or tracer-like object.

source


# PromptingTools.ollama_apiFunction.
julia
ollama_api(prompt_schema::Union{AbstractOllamaManagedSchema, AbstractOllamaSchema},
     prompt::Union{AbstractString, Nothing} = nothing;
     system::Union{Nothing, AbstractString} = nothing,
     messages::Vector{<:AbstractMessage} = AbstractMessage[],
@@ -912,59 +912,59 @@
     model::String = "llama2", http_kwargs::NamedTuple = NamedTuple(),
     stream::Bool = false,
     url::String = "localhost", port::Int = 11434,
-    kwargs...)

Simple wrapper for a call to Ollama API.

Keyword Arguments

  • prompt_schema: Defines which prompt template should be applied.

  • prompt: Can be a string representing the prompt for the AI conversation, a UserMessage, a vector of AbstractMessage

  • system: An optional string representing the system message for the AI conversation. If not provided, a default message will be used.

  • endpoint: The API endpoint to call, only "generate" and "embeddings" are currently supported. Defaults to "generate".

  • model: A string representing the model to use for generating the response. Can be an alias corresponding to a model ID defined in MODEL_ALIASES.

  • http_kwargs::NamedTuple: Additional keyword arguments for the HTTP request. Defaults to empty NamedTuple.

  • stream: A boolean indicating whether to stream the response. Defaults to false.

  • url: The URL of the Ollama API. Defaults to "localhost".

  • port: The port of the Ollama API. Defaults to 11434.

  • kwargs: Prompt variables to be used to fill the prompt/template

source


# PromptingTools.parse_toolMethod.
julia
parse_tool(datatype::Type, blob::AbstractString)

Parse the JSON blob into the specified datatype in try-catch mode.

If parsing fails, it tries to return the untyped JSON blob in a dictionary.

source


# PromptingTools.pprintFunction.

Utility for pretty printing PromptingTools types in REPL.

source


# PromptingTools.pprintMethod.
julia
pprint(io::IO, conversation::AbstractVector{<:AbstractMessage})

Pretty print a vector of AbstractMessage to the given IO stream.

source


# PromptingTools.pprintMethod.
julia
pprint(io::IO, msg::AbstractMessage; text_width::Int = displaysize(io)[2])

Pretty print a single AbstractMessage to the given IO stream.

text_width is the width of the text to be displayed. If not provided, it defaults to the width of the given IO stream and add newline separators as needed.

source


# PromptingTools.previewFunction.

Utility for rendering the conversation (vector of messages) as markdown. REQUIRES the Markdown package to load the extension! See also pprint

source


# PromptingTools.print_contentMethod.
julia
print_content(out::Channel, text::AbstractString; kwargs...)

Print the content to the provided Channel out.

source


# PromptingTools.print_contentMethod.
julia
print_content(out::IO, text::AbstractString; kwargs...)

Print the content to the IO output stream out.

source


# PromptingTools.print_contentMethod.
julia
print_content(out::Nothing, text::Any)

Do nothing if the output stream is nothing.

source


# PromptingTools.push_conversation!Method.
julia
push_conversation!(conv_history, conversation::AbstractVector, max_history::Union{Int, Nothing})

Add a new conversation to the conversation history and resize the history if necessary.

This function appends a conversation to the conv_history, which is a vector of conversations. Each conversation is represented as a vector of AbstractMessage objects. After adding the new conversation, the history is resized according to the max_history parameter to ensure that the size of the history does not exceed the specified limit.

Arguments

  • conv_history: A vector that stores the history of conversations. Typically, this is PT.CONV_HISTORY.

  • conversation: The new conversation to be added. It should be a vector of AbstractMessage objects.

  • max_history: The maximum number of conversations to retain in the history. If Nothing, the history is not resized.

Returns

The updated conversation history.

Example

julia
new_conversation = aigenerate("Hello World"; return_all = true)
-push_conversation!(PT.CONV_HISTORY, new_conversation, 10)

This is done automatically by the ai"" macros.

source


# PromptingTools.recursive_splitterMethod.
julia
recursive_splitter(text::AbstractString, separators::Vector{String}; max_length::Int=35000) -> Vector{String}

Split a given string text into chunks recursively using a series of separators, with each chunk having a maximum length of max_length (if it's achievable given the separators provided). This function is useful for splitting large documents or texts into smaller segments that are more manageable for processing, particularly for models or systems with limited context windows.

It was previously known as split_by_length.

This is similar to Langchain's RecursiveCharacterTextSplitter. To achieve the same behavior, use separators=["\n\n", "\n", " ", ""].

Arguments

  • text::AbstractString: The text to be split.

  • separators::Vector{String}: An ordered list of separators used to split the text. The function iteratively applies these separators to split the text. Recommend to use ["\n\n", ". ", "\n", " "]

  • max_length::Int: The maximum length of each chunk. Defaults to 35,000 characters. This length is considered after each iteration of splitting, ensuring chunks fit within specified constraints.

Returns

Vector{String}: A vector of strings, where each string is a chunk of the original text that is smaller than or equal to max_length.

Usage Tips

  • I tend to prefer splitting on sentences (". ") before splitting on newline characters ("\n") to preserve the structure of the text.

  • What's the difference between separators=["\n"," ",""] and separators=["\n"," "]? The former will split down to character level (""), so it will always achieve the max_length but it will split words (bad for context!) I prefer to instead set slightly smaller max_length but not split words.

How It Works

  • The function processes the text iteratively with each separator in the provided order. It then measures the length of each chunk and splits it further if it exceeds the max_length. If the chunks is "short enough", the subsequent separators are not applied to it.

  • Each chunk is as close to max_length as possible (unless we cannot split it any further, eg, if the splitters are "too big" / there are not enough of them)

  • If the text is empty, the function returns an empty array.

  • Separators are re-added to the text chunks after splitting, preserving the original structure of the text as closely as possible. Apply strip if you do not need them.

  • The function provides separators as the second argument to distinguish itself from its single-separator counterpart dispatch.

Examples

Splitting text using multiple separators:

julia
text = "Paragraph 1\n\nParagraph 2. Sentence 1. Sentence 2.\nParagraph 3"
+    kwargs...)

Simple wrapper for a call to Ollama API.

Keyword Arguments

  • prompt_schema: Defines which prompt template should be applied.

  • prompt: Can be a string representing the prompt for the AI conversation, a UserMessage, a vector of AbstractMessage

  • system: An optional string representing the system message for the AI conversation. If not provided, a default message will be used.

  • endpoint: The API endpoint to call, only "generate" and "embeddings" are currently supported. Defaults to "generate".

  • model: A string representing the model to use for generating the response. Can be an alias corresponding to a model ID defined in MODEL_ALIASES.

  • http_kwargs::NamedTuple: Additional keyword arguments for the HTTP request. Defaults to empty NamedTuple.

  • stream: A boolean indicating whether to stream the response. Defaults to false.

  • url: The URL of the Ollama API. Defaults to "localhost".

  • port: The port of the Ollama API. Defaults to 11434.

  • kwargs: Prompt variables to be used to fill the prompt/template

source


# PromptingTools.parse_toolMethod.
julia
parse_tool(datatype::Type, blob::AbstractString)

Parse the JSON blob into the specified datatype in try-catch mode.

If parsing fails, it tries to return the untyped JSON blob in a dictionary.

source


# PromptingTools.pprintFunction.

Utility for pretty printing PromptingTools types in REPL.

source


# PromptingTools.pprintMethod.
julia
pprint(io::IO, conversation::AbstractVector{<:AbstractMessage})

Pretty print a vector of AbstractMessage to the given IO stream.

source


# PromptingTools.pprintMethod.
julia
pprint(io::IO, msg::AbstractMessage; text_width::Int = displaysize(io)[2])

Pretty print a single AbstractMessage to the given IO stream.

text_width is the width of the text to be displayed. If not provided, it defaults to the width of the given IO stream and add newline separators as needed.

source


# PromptingTools.previewFunction.

Utility for rendering the conversation (vector of messages) as markdown. REQUIRES the Markdown package to load the extension! See also pprint

source


# PromptingTools.print_contentMethod.
julia
print_content(out::Channel, text::AbstractString; kwargs...)

Print the content to the provided Channel out.

source


# PromptingTools.print_contentMethod.
julia
print_content(out::IO, text::AbstractString; kwargs...)

Print the content to the IO output stream out.

source


# PromptingTools.print_contentMethod.
julia
print_content(out::Nothing, text::Any)

Do nothing if the output stream is nothing.

source


# PromptingTools.push_conversation!Method.
julia
push_conversation!(conv_history, conversation::AbstractVector, max_history::Union{Int, Nothing})

Add a new conversation to the conversation history and resize the history if necessary.

This function appends a conversation to the conv_history, which is a vector of conversations. Each conversation is represented as a vector of AbstractMessage objects. After adding the new conversation, the history is resized according to the max_history parameter to ensure that the size of the history does not exceed the specified limit.

Arguments

  • conv_history: A vector that stores the history of conversations. Typically, this is PT.CONV_HISTORY.

  • conversation: The new conversation to be added. It should be a vector of AbstractMessage objects.

  • max_history: The maximum number of conversations to retain in the history. If Nothing, the history is not resized.

Returns

The updated conversation history.

Example

julia
new_conversation = aigenerate("Hello World"; return_all = true)
+push_conversation!(PT.CONV_HISTORY, new_conversation, 10)

This is done automatically by the ai"" macros.

source


# PromptingTools.recursive_splitterMethod.
julia
recursive_splitter(text::AbstractString, separators::Vector{String}; max_length::Int=35000) -> Vector{String}

Split a given string text into chunks recursively using a series of separators, with each chunk having a maximum length of max_length (if it's achievable given the separators provided). This function is useful for splitting large documents or texts into smaller segments that are more manageable for processing, particularly for models or systems with limited context windows.

It was previously known as split_by_length.

This is similar to Langchain's RecursiveCharacterTextSplitter. To achieve the same behavior, use separators=["\n\n", "\n", " ", ""].

Arguments

  • text::AbstractString: The text to be split.

  • separators::Vector{String}: An ordered list of separators used to split the text. The function iteratively applies these separators to split the text. Recommend to use ["\n\n", ". ", "\n", " "]

  • max_length::Int: The maximum length of each chunk. Defaults to 35,000 characters. This length is considered after each iteration of splitting, ensuring chunks fit within specified constraints.

Returns

Vector{String}: A vector of strings, where each string is a chunk of the original text that is smaller than or equal to max_length.

Usage Tips

  • I tend to prefer splitting on sentences (". ") before splitting on newline characters ("\n") to preserve the structure of the text.

  • What's the difference between separators=["\n"," ",""] and separators=["\n"," "]? The former will split down to character level (""), so it will always achieve the max_length but it will split words (bad for context!) I prefer to instead set slightly smaller max_length but not split words.

How It Works

  • The function processes the text iteratively with each separator in the provided order. It then measures the length of each chunk and splits it further if it exceeds the max_length. If the chunks is "short enough", the subsequent separators are not applied to it.

  • Each chunk is as close to max_length as possible (unless we cannot split it any further, eg, if the splitters are "too big" / there are not enough of them)

  • If the text is empty, the function returns an empty array.

  • Separators are re-added to the text chunks after splitting, preserving the original structure of the text as closely as possible. Apply strip if you do not need them.

  • The function provides separators as the second argument to distinguish itself from its single-separator counterpart dispatch.

Examples

Splitting text using multiple separators:

julia
text = "Paragraph 1\n\nParagraph 2. Sentence 1. Sentence 2.\nParagraph 3"
 separators = ["\n\n", ". ", "\n"] # split by paragraphs, sentences, and newlines (not by words)
 chunks = recursive_splitter(text, separators, max_length=20)

Splitting text using multiple separators - with splitting on words:

julia
text = "Paragraph 1\n\nParagraph 2. Sentence 1. Sentence 2.\nParagraph 3"
 separators = ["\n\n", ". ", "\n", " "] # split by paragraphs, sentences, and newlines, words
 chunks = recursive_splitter(text, separators, max_length=10)

Using a single separator:

julia
text = "Hello,World," ^ 2900  # length 34900 characters
 chunks = recursive_splitter(text, [","], max_length=10000)

To achieve the same behavior as Langchain's RecursiveCharacterTextSplitter, use separators=["\n\n", "\n", " ", ""].

julia
text = "Paragraph 1\n\nParagraph 2. Sentence 1. Sentence 2.\nParagraph 3"
 separators = ["\n\n", "\n", " ", ""]
-chunks = recursive_splitter(text, separators, max_length=10)

source


# PromptingTools.recursive_splitterMethod.
julia
recursive_splitter(text::String; separator::String=" ", max_length::Int=35000) -> Vector{String}

Split a given string text into chunks of a specified maximum length max_length. This is particularly useful for splitting larger documents or texts into smaller segments, suitable for models or systems with smaller context windows.

There is a method for dispatching on multiple separators, recursive_splitter(text::String, separators::Vector{String}; max_length::Int=35000) -> Vector{String} that mimics the logic of Langchain's RecursiveCharacterTextSplitter.

Arguments

  • text::String: The text to be split.

  • separator::String=" ": The separator used to split the text into minichunks. Defaults to a space character.

  • max_length::Int=35000: The maximum length of each chunk. Defaults to 35,000 characters, which should fit within 16K context window.

Returns

Vector{String}: A vector of strings, each representing a chunk of the original text that is smaller than or equal to max_length.

Notes

  • The function ensures that each chunk is as close to max_length as possible without exceeding it.

  • If the text is empty, the function returns an empty array.

  • The separator is re-added to the text chunks after splitting, preserving the original structure of the text as closely as possible.

Examples

Splitting text with the default separator (" "):

julia
text = "Hello world. How are you?"
+chunks = recursive_splitter(text, separators, max_length=10)

source


# PromptingTools.recursive_splitterMethod.
julia
recursive_splitter(text::String; separator::String=" ", max_length::Int=35000) -> Vector{String}

Split a given string text into chunks of a specified maximum length max_length. This is particularly useful for splitting larger documents or texts into smaller segments, suitable for models or systems with smaller context windows.

There is a method for dispatching on multiple separators, recursive_splitter(text::String, separators::Vector{String}; max_length::Int=35000) -> Vector{String} that mimics the logic of Langchain's RecursiveCharacterTextSplitter.

Arguments

  • text::String: The text to be split.

  • separator::String=" ": The separator used to split the text into minichunks. Defaults to a space character.

  • max_length::Int=35000: The maximum length of each chunk. Defaults to 35,000 characters, which should fit within 16K context window.

Returns

Vector{String}: A vector of strings, each representing a chunk of the original text that is smaller than or equal to max_length.

Notes

  • The function ensures that each chunk is as close to max_length as possible without exceeding it.

  • If the text is empty, the function returns an empty array.

  • The separator is re-added to the text chunks after splitting, preserving the original structure of the text as closely as possible.

Examples

Splitting text with the default separator (" "):

julia
text = "Hello world. How are you?"
 chunks = recursive_splitter(text; max_length=13)
 length(chunks) # Output: 2

Using a custom separator and custom max_length

julia
text = "Hello,World," ^ 2900 # length 34900 chars
 recursive_splitter(text; separator=",", max_length=10000) # for 4K context window
-length(chunks[1]) # Output: 4

source


# PromptingTools.register_model!Function.
julia
register_model!(registry = MODEL_REGISTRY;
+length(chunks[1]) # Output: 4

source


# PromptingTools.register_model!Function.
julia
register_model!(registry = MODEL_REGISTRY;
     name::String,
     schema::Union{AbstractPromptSchema, Nothing} = nothing,
     cost_of_token_prompt::Float64 = 0.0,
     cost_of_token_generation::Float64 = 0.0,
-    description::String = "")

Register a new AI model with name and its associated schema.

Registering a model helps with calculating the costs and automatically selecting the right prompt schema.

Arguments

  • name: The name of the model. This is the name that will be used to refer to the model in the ai* functions.

  • schema: The schema of the model. This is the schema that will be used to generate prompts for the model, eg, OpenAISchema().

  • cost_of_token_prompt: The cost of a token in the prompt for this model. This is used to calculate the cost of a prompt. Note: It is often provided online as cost per 1000 tokens, so make sure to convert it correctly!

  • cost_of_token_generation: The cost of a token generated by this model. This is used to calculate the cost of a generation. Note: It is often provided online as cost per 1000 tokens, so make sure to convert it correctly!

  • description: A description of the model. This is used to provide more information about the model when it is queried.

source


# PromptingTools.remove_julia_promptMethod.
julia
remove_julia_prompt(s::T) where {T<:AbstractString}

If it detects a julia prompt, it removes it and all lines that do not have it (except for those that belong to the code block).

source


# PromptingTools.remove_templates!Method.
julia
    remove_templates!()

Removes all templates from TEMPLATE_STORE and TEMPLATE_METADATA.

source


# PromptingTools.remove_unsafe_linesMethod.

Iterates over the lines of a string and removes those that contain a package operation or a missing import.

source


# PromptingTools.renderMethod.

Renders provided messaging template (template) under the default schema (PROMPT_SCHEMA).

source


# PromptingTools.renderMethod.
julia
render(schema::AbstractAnthropicSchema,
+    description::String = "")

Register a new AI model with name and its associated schema.

Registering a model helps with calculating the costs and automatically selecting the right prompt schema.

Arguments

  • name: The name of the model. This is the name that will be used to refer to the model in the ai* functions.

  • schema: The schema of the model. This is the schema that will be used to generate prompts for the model, eg, OpenAISchema().

  • cost_of_token_prompt: The cost of a token in the prompt for this model. This is used to calculate the cost of a prompt. Note: It is often provided online as cost per 1000 tokens, so make sure to convert it correctly!

  • cost_of_token_generation: The cost of a token generated by this model. This is used to calculate the cost of a generation. Note: It is often provided online as cost per 1000 tokens, so make sure to convert it correctly!

  • description: A description of the model. This is used to provide more information about the model when it is queried.

source


# PromptingTools.remove_julia_promptMethod.
julia
remove_julia_prompt(s::T) where {T<:AbstractString}

If it detects a julia prompt, it removes it and all lines that do not have it (except for those that belong to the code block).

source


# PromptingTools.remove_templates!Method.
julia
    remove_templates!()

Removes all templates from TEMPLATE_STORE and TEMPLATE_METADATA.

source


# PromptingTools.remove_unsafe_linesMethod.

Iterates over the lines of a string and removes those that contain a package operation or a missing import.

source


# PromptingTools.renderMethod.

Renders provided messaging template (template) under the default schema (PROMPT_SCHEMA).

source


# PromptingTools.renderMethod.
julia
render(schema::AbstractAnthropicSchema,
     messages::Vector{<:AbstractMessage};
     aiprefill::Union{Nothing, AbstractString} = nothing,
     conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],
     no_system_message::Bool = false,
     cache::Union{Nothing, Symbol} = nothing,
-    kwargs...)

Builds a history of the conversation to provide the prompt to the API. All unspecified kwargs are passed as replacements such that =>value in the template.

Keyword Arguments

  • aiprefill: A string to be used as a prefill for the AI response. This steer the AI response in a certain direction (and potentially save output tokens).

  • conversation: Past conversation to be included in the beginning of the prompt (for continued conversations).

  • no_system_message: If true, do not include the default system message in the conversation history OR convert any provided system message to a user message.

  • cache: A symbol representing the caching strategy to be used. Currently only nothing (no caching), :system, :tools,:last and :all are supported.

source


# PromptingTools.renderMethod.
julia
render(schema::AbstractAnthropicSchema,
+    kwargs...)

Builds a history of the conversation to provide the prompt to the API. All unspecified kwargs are passed as replacements such that =>value in the template.

Keyword Arguments

  • aiprefill: A string to be used as a prefill for the AI response. This steer the AI response in a certain direction (and potentially save output tokens).

  • conversation: Past conversation to be included in the beginning of the prompt (for continued conversations).

  • no_system_message: If true, do not include the default system message in the conversation history OR convert any provided system message to a user message.

  • cache: A symbol representing the caching strategy to be used. Currently only nothing (no caching), :system, :tools,:last and :all are supported.

source


# PromptingTools.renderMethod.
julia
render(schema::AbstractAnthropicSchema,
     tools::Vector{<:AbstractTool};
-    kwargs...)

Renders the tool signatures into the Anthropic format.

source


# PromptingTools.renderMethod.
julia
render(schema::AbstractGoogleSchema,
+    kwargs...)

Renders the tool signatures into the Anthropic format.

source


# PromptingTools.renderMethod.
julia
render(schema::AbstractGoogleSchema,
     messages::Vector{<:AbstractMessage};
     conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],
     no_system_message::Bool = false,
-    kwargs...)

Builds a history of the conversation to provide the prompt to the API. All unspecified kwargs are passed as replacements such that =>value in the template.

Keyword Arguments

  • conversation: An optional vector of AbstractMessage objects representing the conversation history. If not provided, it is initialized as an empty vector.

  • no_system_message::Bool=false: If true, do not include the default system message in the conversation history OR convert any provided system message to a user message.

source


# PromptingTools.renderMethod.
julia
render(schema::AbstractOllamaManagedSchema,
+    kwargs...)

Builds a history of the conversation to provide the prompt to the API. All unspecified kwargs are passed as replacements such that =>value in the template.

Keyword Arguments

  • conversation: An optional vector of AbstractMessage objects representing the conversation history. If not provided, it is initialized as an empty vector.

  • no_system_message::Bool=false: If true, do not include the default system message in the conversation history OR convert any provided system message to a user message.

source


# PromptingTools.renderMethod.
julia
render(schema::AbstractOllamaManagedSchema,
     messages::Vector{<:AbstractMessage};
     conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],
-    kwargs...)

Builds a history of the conversation to provide the prompt to the API. All unspecified kwargs are passed as replacements such that =>value in the template.

Note: Due to its "managed" nature, at most 2 messages can be provided (system and prompt inputs in the API).

Keyword Arguments

  • conversation: Not allowed for this schema. Provided only for compatibility.

source


# PromptingTools.renderMethod.
julia
render(schema::AbstractOllamaSchema,
+    kwargs...)

Builds a history of the conversation to provide the prompt to the API. All unspecified kwargs are passed as replacements such that =>value in the template.

Note: Due to its "managed" nature, at most 2 messages can be provided (system and prompt inputs in the API).

Keyword Arguments

  • conversation: Not allowed for this schema. Provided only for compatibility.

source


# PromptingTools.renderMethod.
julia
render(schema::AbstractOllamaSchema,
     messages::Vector{<:AbstractMessage};
     conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],
     no_system_message::Bool = false,
-    kwargs...)

Builds a history of the conversation to provide the prompt to the API. All unspecified kwargs are passed as replacements such that =>value in the template.

Keyword Arguments

  • conversation: An optional vector of AbstractMessage objects representing the conversation history. If not provided, it is initialized as an empty vector.

  • no_system_message: If true, do not include the default system message in the conversation history OR convert any provided system message to a user message.

source


# PromptingTools.renderMethod.
julia
render(schema::AbstractOpenAISchema,
+    kwargs...)

Builds a history of the conversation to provide the prompt to the API. All unspecified kwargs are passed as replacements such that =>value in the template.

Keyword Arguments

  • conversation: An optional vector of AbstractMessage objects representing the conversation history. If not provided, it is initialized as an empty vector.

  • no_system_message: If true, do not include the default system message in the conversation history OR convert any provided system message to a user message.

source


# PromptingTools.renderMethod.
julia
render(schema::AbstractOpenAISchema,
     messages::Vector{<:AbstractMessage};
     image_detail::AbstractString = "auto",
     conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],
     no_system_message::Bool = false,
     name_user::Union{Nothing, String} = nothing,
-    kwargs...)

Builds a history of the conversation to provide the prompt to the API. All unspecified kwargs are passed as replacements such that =>value in the template.

Keyword Arguments

  • image_detail: Only for UserMessageWithImages. It represents the level of detail to include for images. Can be "auto", "high", or "low".

  • conversation: An optional vector of AbstractMessage objects representing the conversation history. If not provided, it is initialized as an empty vector.

  • no_system_message: If true, do not include the default system message in the conversation history OR convert any provided system message to a user message.

  • name_user: No-op for consistency.

source


# PromptingTools.renderMethod.
julia
render(schema::AbstractOpenAISchema,
+    kwargs...)

Builds a history of the conversation to provide the prompt to the API. All unspecified kwargs are passed as replacements such that =>value in the template.

Keyword Arguments

  • image_detail: Only for UserMessageWithImages. It represents the level of detail to include for images. Can be "auto", "high", or "low".

  • conversation: An optional vector of AbstractMessage objects representing the conversation history. If not provided, it is initialized as an empty vector.

  • no_system_message: If true, do not include the default system message in the conversation history OR convert any provided system message to a user message.

  • name_user: No-op for consistency.

source


# PromptingTools.renderMethod.
julia
render(schema::AbstractOpenAISchema,
     tools::Vector{<:AbstractTool};
     json_mode::Union{Nothing, Bool} = nothing,
-    kwargs...)

Renders the tool signatures into the OpenAI format.

source


# PromptingTools.renderMethod.
julia
render(tracer_schema::AbstractTracerSchema,
-    conv::AbstractVector{<:AbstractMessage}; kwargs...)

Passthrough. No changes.

source


# PromptingTools.renderMethod.
julia
render(schema::NoSchema,
+    kwargs...)

Renders the tool signatures into the OpenAI format.

source


# PromptingTools.renderMethod.
julia
render(tracer_schema::AbstractTracerSchema,
+    conv::AbstractVector{<:AbstractMessage}; kwargs...)

Passthrough. No changes.

source


# PromptingTools.renderMethod.
julia
render(schema::NoSchema,
     messages::Vector{<:AbstractMessage};
     conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[],
     no_system_message::Bool = false,
-    replacement_kwargs...)

Renders a conversation history from a vector of messages with all replacement variables specified in replacement_kwargs.

It is the first pass of the prompt rendering system, and is used by all other schemas.

Keyword Arguments

  • image_detail: Only for UserMessageWithImages. It represents the level of detail to include for images. Can be "auto", "high", or "low".

  • conversation: An optional vector of AbstractMessage objects representing the conversation history. If not provided, it is initialized as an empty vector.

  • no_system_message: If true, do not include the default system message in the conversation history OR convert any provided system message to a user message.

Notes

  • All unspecified kwargs are passed as replacements such that =>value in the template.

  • If a SystemMessage is missing, we inject a default one at the beginning of the conversation.

  • Only one SystemMessage is allowed (ie, cannot mix two conversations different system prompts).

source


# PromptingTools.replace_wordsMethod.
julia
replace_words(text::AbstractString, words::Vector{<:AbstractString}; replacement::AbstractString="ABC")

Replace all occurrences of words in words with replacement in text. Useful to quickly remove specific names or entities from a text.

Arguments

  • text::AbstractString: The text to be processed.

  • words::Vector{<:AbstractString}: A vector of words to be replaced.

  • replacement::AbstractString="ABC": The replacement string to be used. Defaults to "ABC".

Example

julia
text = "Disney is a great company"
+    replacement_kwargs...)

Renders a conversation history from a vector of messages with all replacement variables specified in replacement_kwargs.

It is the first pass of the prompt rendering system, and is used by all other schemas.

Keyword Arguments

  • image_detail: Only for UserMessageWithImages. It represents the level of detail to include for images. Can be "auto", "high", or "low".

  • conversation: An optional vector of AbstractMessage objects representing the conversation history. If not provided, it is initialized as an empty vector.

  • no_system_message: If true, do not include the default system message in the conversation history OR convert any provided system message to a user message.

Notes

  • All unspecified kwargs are passed as replacements such that =>value in the template.

  • If a SystemMessage is missing, we inject a default one at the beginning of the conversation.

  • Only one SystemMessage is allowed (ie, cannot mix two conversations different system prompts).

source


# PromptingTools.replace_wordsMethod.
julia
replace_words(text::AbstractString, words::Vector{<:AbstractString}; replacement::AbstractString="ABC")

Replace all occurrences of words in words with replacement in text. Useful to quickly remove specific names or entities from a text.

Arguments

  • text::AbstractString: The text to be processed.

  • words::Vector{<:AbstractString}: A vector of words to be replaced.

  • replacement::AbstractString="ABC": The replacement string to be used. Defaults to "ABC".

Example

julia
text = "Disney is a great company"
 replace_words(text, ["Disney", "Snow White", "Mickey Mouse"])
-# Output: "ABC is a great company"

source


# PromptingTools.resize_conversation!Method.
julia
resize_conversation!(conv_history, max_history::Union{Int, Nothing})

Resize the conversation history to a specified maximum length.

This function trims the conv_history to ensure that its size does not exceed max_history. It removes the oldest conversations first if the length of conv_history is greater than max_history.

Arguments

  • conv_history: A vector that stores the history of conversations. Typically, this is PT.CONV_HISTORY.

  • max_history: The maximum number of conversations to retain in the history. If Nothing, the history is not resized.

Returns

The resized conversation history.

Example

julia
resize_conversation!(PT.CONV_HISTORY, PT.MAX_HISTORY_LENGTH)

After the function call, conv_history will contain only the 10 most recent conversations.

This is done automatically by the ai"" macros.

source


# PromptingTools.response_to_messageMethod.
julia
response_to_message(schema::AbstractOpenAISchema,
+# Output: "ABC is a great company"

source


# PromptingTools.resize_conversation!Method.
julia
resize_conversation!(conv_history, max_history::Union{Int, Nothing})

Resize the conversation history to a specified maximum length.

This function trims the conv_history to ensure that its size does not exceed max_history. It removes the oldest conversations first if the length of conv_history is greater than max_history.

Arguments

  • conv_history: A vector that stores the history of conversations. Typically, this is PT.CONV_HISTORY.

  • max_history: The maximum number of conversations to retain in the history. If Nothing, the history is not resized.

Returns

The resized conversation history.

Example

julia
resize_conversation!(PT.CONV_HISTORY, PT.MAX_HISTORY_LENGTH)

After the function call, conv_history will contain only the 10 most recent conversations.

This is done automatically by the ai"" macros.

source


# PromptingTools.response_to_messageMethod.
julia
response_to_message(schema::AbstractOpenAISchema,
     MSG::Type{AIMessage},
     choice,
     resp;
@@ -972,8 +972,8 @@
     time::Float64 = 0.0,
     run_id::Int = Int(rand(Int32)),
     sample_id::Union{Nothing, Integer} = nothing,
-    name_assistant::Union{Nothing, String} = nothing)

Utility to facilitate unwrapping of HTTP response to a message type MSG provided for OpenAI-like responses

Note: Extracts finish_reason and log_prob if available in the response.

Arguments

  • schema::AbstractOpenAISchema: The schema for the prompt.

  • MSG::Type{AIMessage}: The message type to be returned.

  • choice: The choice from the response (eg, one of the completions).

  • resp: The response from the OpenAI API.

  • model_id::AbstractString: The model ID to use for generating the response. Defaults to an empty string.

  • time::Float64: The elapsed time for the response. Defaults to 0.0.

  • run_id::Integer: The run ID for the response. Defaults to a random integer.

  • sample_id::Union{Nothing, Integer}: The sample ID for the response (if there are multiple completions). Defaults to nothing.

  • name_assistant::Union{Nothing, String}: The name to use for the assistant in the conversation history. Defaults to nothing.

source


# PromptingTools.response_to_messageMethod.

Utility to facilitate unwrapping of HTTP response to a message type MSG provided. Designed to handle multi-sample completions.

source


# PromptingTools.save_conversationMethod.
julia
save_conversation(io_or_file::Union{IO, AbstractString},
-    messages::AbstractVector{<:AbstractMessage})

Saves provided conversation (messages) to io_or_file. If you need to add some metadata, see save_template.

source


# PromptingTools.save_conversationsMethod.
julia
save_conversations(schema::AbstractPromptSchema, filename::AbstractString,
+    name_assistant::Union{Nothing, String} = nothing)

Utility to facilitate unwrapping of HTTP response to a message type MSG provided for OpenAI-like responses

Note: Extracts finish_reason and log_prob if available in the response.

Arguments

  • schema::AbstractOpenAISchema: The schema for the prompt.

  • MSG::Type{AIMessage}: The message type to be returned.

  • choice: The choice from the response (eg, one of the completions).

  • resp: The response from the OpenAI API.

  • model_id::AbstractString: The model ID to use for generating the response. Defaults to an empty string.

  • time::Float64: The elapsed time for the response. Defaults to 0.0.

  • run_id::Integer: The run ID for the response. Defaults to a random integer.

  • sample_id::Union{Nothing, Integer}: The sample ID for the response (if there are multiple completions). Defaults to nothing.

  • name_assistant::Union{Nothing, String}: The name to use for the assistant in the conversation history. Defaults to nothing.

source


# PromptingTools.response_to_messageMethod.

Utility to facilitate unwrapping of HTTP response to a message type MSG provided. Designed to handle multi-sample completions.

source


# PromptingTools.save_conversationMethod.
julia
save_conversation(io_or_file::Union{IO, AbstractString},
+    messages::AbstractVector{<:AbstractMessage})

Saves provided conversation (messages) to io_or_file. If you need to add some metadata, see save_template.

source


# PromptingTools.save_conversationsMethod.
julia
save_conversations(schema::AbstractPromptSchema, filename::AbstractString,
     conversations::Vector{<:AbstractVector{<:PT.AbstractMessage}})

Saves provided conversations (vector of vectors of messages) to filename rendered in the particular schema.

Commonly used for finetuning models with schema = ShareGPTSchema()

The format is JSON Lines, where each line is a JSON object representing one provided conversation.

See also: save_conversation

Examples

You must always provide a VECTOR of conversations

julia
messages = AbstractMessage[SystemMessage("System message 1"),
     UserMessage("User message"),
     AIMessage("AI message")]
@@ -984,12 +984,12 @@
 save_conversations(fn, conversation)
 
 # Content of the file (one line for each conversation)
-# {"conversations":[{"value":"System message 1","from":"system"},{"value":"User message","from":"human"},{"value":"AI message","from":"gpt"}]}

source


# PromptingTools.save_templateMethod.
julia
save_template(io_or_file::Union{IO, AbstractString},
+# {"conversations":[{"value":"System message 1","from":"system"},{"value":"User message","from":"human"},{"value":"AI message","from":"gpt"}]}

source


# PromptingTools.save_templateMethod.
julia
save_template(io_or_file::Union{IO, AbstractString},
     messages::AbstractVector{<:AbstractChatMessage};
     content::AbstractString = "Template Metadata",
     description::AbstractString = "",
     version::AbstractString = "1",
-    source::AbstractString = "")

Saves provided messaging template (messages) to io_or_file. Automatically adds metadata based on provided keyword arguments.

source


# PromptingTools.set_preferences!Method.
julia
set_preferences!(pairs::Pair{String, <:Any}...)

Set preferences for PromptingTools. See ?PREFERENCES for more information.

See also: get_preferences

Example

Change your API key and default model:

julia
PromptingTools.set_preferences!("OPENAI_API_KEY" => "key1", "MODEL_CHAT" => "chat1")

source


# PromptingTools.set_properties_strict!Method.
julia
set_properties_strict!(properties::AbstractDict)

Sets strict mode for the properties of a JSON schema.

Changes:

  • Sets additionalProperties to false.

  • All keys must be included in required.

  • All optional keys will have null added to their type.

Reference: https://platform.openai.com/docs/guides/structured-outputs/supported-schemas

source


# PromptingTools.streamed_request!Method.
julia
streamed_request!(cb::AbstractStreamCallback, url, headers, input; kwargs...)

End-to-end wrapper for POST streaming requests. In-place modification of the callback object (cb.chunks) with the results of the request being returned. We build the body of the response object in the end and write it into the resp.body.

Returns the response object.

Arguments

  • cb: The callback object.

  • url: The URL to send the request to.

  • headers: The headers to send with the request.

  • input: A buffer with the request body.

  • kwargs: Additional keyword arguments.

source


# PromptingTools.tool_call_signatureMethod.
julia
tool_call_signature(
+    source::AbstractString = "")

Saves provided messaging template (messages) to io_or_file. Automatically adds metadata based on provided keyword arguments.

source


# PromptingTools.set_preferences!Method.
julia
set_preferences!(pairs::Pair{String, <:Any}...)

Set preferences for PromptingTools. See ?PREFERENCES for more information.

See also: get_preferences

Example

Change your API key and default model:

julia
PromptingTools.set_preferences!("OPENAI_API_KEY" => "key1", "MODEL_CHAT" => "chat1")

source


# PromptingTools.set_properties_strict!Method.
julia
set_properties_strict!(properties::AbstractDict)

Sets strict mode for the properties of a JSON schema.

Changes:

  • Sets additionalProperties to false.

  • All keys must be included in required.

  • All optional keys will have null added to their type.

Reference: https://platform.openai.com/docs/guides/structured-outputs/supported-schemas

source


# PromptingTools.streamed_request!Method.
julia
streamed_request!(cb::AbstractStreamCallback, url, headers, input; kwargs...)

End-to-end wrapper for POST streaming requests. In-place modification of the callback object (cb.chunks) with the results of the request being returned. We build the body of the response object in the end and write it into the resp.body.

Returns the response object.

Arguments

  • cb: The callback object.

  • url: The URL to send the request to.

  • headers: The headers to send with the request.

  • input: A buffer with the request body.

  • kwargs: Additional keyword arguments.

source


# PromptingTools.tool_call_signatureMethod.
julia
tool_call_signature(
     type_or_method::Union{Type, Method}; strict::Union{Nothing, Bool} = nothing,
     max_description_length::Int = 200, name::Union{Nothing, String} = nothing,
     docs::Union{Nothing, String} = nothing)

Extract the argument names, types and docstrings from a struct to create the function call signature in JSON schema.

You must provide a Struct type (not an instance of it) with some fields.

Note: Fairly experimental, but works for combination of structs, arrays, strings and singletons.

Arguments

  • type_or_method::Union{Type, Method}: The struct type or method to extract the signature from.

  • strict::Union{Nothing, Bool}: Whether to enforce strict mode for the schema. Defaults to nothing.

  • max_description_length::Int: Maximum length for descriptions. Defaults to 200.

  • name::Union{Nothing, String}: The name of the tool. Defaults to the name of the struct.

  • docs::Union{Nothing, String}: The description of the tool. Defaults to the docstring of the struct/overall function.

Returns

  • Dict{String, Any}: A dictionary representing the function call signature schema.

Tips

  • You can improve the quality of the extraction by writing a helpful docstring for your struct (or any nested struct). It will be provided as a description.

You can even include comments/descriptions about the individual fields.

  • All fields are assumed to be required, unless you allow null values (eg, ::Union{Nothing, Int}). Fields with Nothing will be treated as optional.

  • Missing values are ignored (eg, ::Union{Missing, Int} will be treated as Int). It's for broader compatibility and we cannot deserialize it as easily as Nothing.

Example

Do you want to extract some specific measurements from a text like age, weight and height? You need to define the information you need as a struct (return_type):

struct MyMeasurement
@@ -1012,7 +1012,7 @@
 
 Or if you want your extraction to fail gracefully when data isn't found, use `MaybeExtract{T}` wrapper (inspired by Instructor package!):

using PromptingTools: MaybeExtract

type = MaybeExtract

Effectively the same as:

struct MaybeExtract{T}

result::Union{T, Nothing}

error::Bool // true if a result is found, false otherwise

message::Union{Nothing, String} // Only present if no result is found, should be short and concise

end

If LLM extraction fails, it will return a Dict with error and message fields instead of the result!

msg = aiextract("Extract measurements from the text: I am giraffe", type)


Dict{Symbol, Any} with 2 entries:

:message => "Sorry, this feature is only available for humans."

:error => true

That

 
-[source](https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/extraction.jl#L341-L424)
+[source](https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/extraction.jl#L341-L424)
 
 </div>
 <br>
@@ -1026,7 +1026,7 @@
 tool_call_signature(fields::Vector;
     strict::Union{Nothing, Bool} = nothing, max_description_length::Int = 200, name::Union{
         Nothing, String} = nothing,
-    docs::Union{Nothing, String} = nothing)

Generate a function call signature schema for a dynamically generated struct based on the provided fields.

Arguments

  • fields::Vector{Union{Symbol, Pair{Symbol, Type}, Pair{Symbol, String}}}: A vector of field names or pairs of field name and type or string description, eg, [:field1, :field2, :field3] or [:field1 => String, :field2 => Int, :field3 => Float64] or [:field1 => String, :field1__description => "Field 1 has the name"].

  • strict::Union{Nothing, Bool}: Whether to enforce strict mode for the schema. Defaults to nothing.

  • max_description_length::Int: Maximum length for descriptions. Defaults to 200.

  • name::Union{Nothing, String}: The name of the tool. Defaults to the name of the struct.

  • docs::Union{Nothing, String}: The description of the tool. Defaults to the docstring of the struct/overall function.

Returns a tool_map with the tool name as the key and the tool object as the value.

See also generate_struct, aiextract, update_field_descriptions!.

Examples

julia
tool_map = tool_call_signature([:field1, :field2, :field3])

With the field types:

julia
tool_map = tool_call_signature([:field1 => String, :field2 => Int, :field3 => Float64])

And with the field descriptions:

julia
tool_map = tool_call_signature([:field1 => String, :field1__description => "Field 1 has the name"])

source


# PromptingTools.unique_permutationMethod.
julia
unique_permutation(inputs::AbstractVector)

Returns indices of unique items in a vector inputs. Access the unique values as inputs[unique_permutation(inputs)].

source


# PromptingTools.unwrapMethod.

Unwraps the tracer message or tracer-like object, returning the original object.

source


# PromptingTools.update_field_descriptions!Method.
julia
update_field_descriptions!(
+    docs::Union{Nothing, String} = nothing)

Generate a function call signature schema for a dynamically generated struct based on the provided fields.

Arguments

  • fields::Vector{Union{Symbol, Pair{Symbol, Type}, Pair{Symbol, String}}}: A vector of field names or pairs of field name and type or string description, eg, [:field1, :field2, :field3] or [:field1 => String, :field2 => Int, :field3 => Float64] or [:field1 => String, :field1__description => "Field 1 has the name"].

  • strict::Union{Nothing, Bool}: Whether to enforce strict mode for the schema. Defaults to nothing.

  • max_description_length::Int: Maximum length for descriptions. Defaults to 200.

  • name::Union{Nothing, String}: The name of the tool. Defaults to the name of the struct.

  • docs::Union{Nothing, String}: The description of the tool. Defaults to the docstring of the struct/overall function.

Returns a tool_map with the tool name as the key and the tool object as the value.

See also generate_struct, aiextract, update_field_descriptions!.

Examples

julia
tool_map = tool_call_signature([:field1, :field2, :field3])

With the field types:

julia
tool_map = tool_call_signature([:field1 => String, :field2 => Int, :field3 => Float64])

And with the field descriptions:

julia
tool_map = tool_call_signature([:field1 => String, :field1__description => "Field 1 has the name"])

source


# PromptingTools.unique_permutationMethod.
julia
unique_permutation(inputs::AbstractVector)

Returns indices of unique items in a vector inputs. Access the unique values as inputs[unique_permutation(inputs)].

source


# PromptingTools.unwrapMethod.

Unwraps the tracer message or tracer-like object, returning the original object.

source


# PromptingTools.update_field_descriptions!Method.
julia
update_field_descriptions!(
     parameters::Dict{String, <:Any}, descriptions::Dict{Symbol, <:AbstractString};
     max_description_length::Int = 200)

Update the given JSON schema with descriptions from the descriptions dictionary. This function modifies the schema in-place, adding a "description" field to each property that has a corresponding entry in the descriptions dictionary.

Note: It modifies the schema in place. Only the top-level "properties" are updated!

Returns: The modified schema dictionary.

Arguments

  • parameters: A dictionary representing the JSON schema to be updated.

  • descriptions: A dictionary mapping field names (as symbols) to their descriptions.

  • max_description_length::Int: Maximum length for descriptions. Defaults to 200.

Examples

julia
    parameters = Dict{String, Any}(
         "properties" => Dict{String, Any}(
@@ -1041,9 +1041,9 @@
         :temperature => "Temperature in degrees Fahrenheit",
         :condition => "Current weather condition (e.g., sunny, rainy, cloudy)"
     )
-    update_field_descriptions!(parameters, descriptions)

source


# PromptingTools.wrap_stringFunction.
julia
wrap_string(str::String,
+    update_field_descriptions!(parameters, descriptions)

source


# PromptingTools.wrap_stringFunction.
julia
wrap_string(str::String,
     text_width::Int = 20;
-    newline::Union{AbstractString, AbstractChar} = '

')

Breaks a string into lines of a given text_width. Optionally, you can specify the newline character or string to use.

Example:

julia
wrap_string("Certainly, here's a function in Julia that will wrap a string according to the specifications:", 10) |> print

source


# PromptingTools.@aai_strMacro.
julia
aai"user_prompt"[model_alias] -> AIMessage

Asynchronous version of @ai_str macro, which will log the result once it's ready.

See also aai!"" if you want an asynchronous reply to the provided message / continue the conversation.

Example

Send asynchronous request to GPT-4, so we don't have to wait for the response: Very practical with slow models, so you can keep working in the meantime.

julia

+    newline::Union{AbstractString, AbstractChar} = '

')

Breaks a string into lines of a given text_width. Optionally, you can specify the newline character or string to use.

Example:

julia
wrap_string("Certainly, here's a function in Julia that will wrap a string according to the specifications:", 10) |> print

source


# PromptingTools.@aai_strMacro.
julia
aai"user_prompt"[model_alias] -> AIMessage

Asynchronous version of @ai_str macro, which will log the result once it's ready.

See also aai!"" if you want an asynchronous reply to the provided message / continue the conversation.

Example

Send asynchronous request to GPT-4, so we don't have to wait for the response: Very practical with slow models, so you can keep working in the meantime.

julia

 **...with some delay...**
 
 **[ Info: Tokens: 29 @ Cost: 0.0011
@@ -1052,7 +1052,7 @@
 **[ Info: AIMessage> Hello! How can I assist you today?**
 
 
-[source](https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/macros.jl#L99-L116)
+[source](https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/macros.jl#L99-L116)
 
 </div>
 <br>
@@ -1070,16 +1070,16 @@
 
 # continue the conversation (notice that you can change the model, eg, to more powerful one for better answer)
 ai!"What do you think about that?"gpt4t
-# AIMessage("Considering our previous discussion, I think that...")

Usage Notes

  • This macro should be used when you want to maintain the context of an ongoing conversation (ie, the last ai"" message).

  • It automatically accesses and updates the global conversation history.

  • If no conversation history is found, it raises an assertion error, suggesting to initiate a new conversation using ai"" instead.

Important

Ensure that the conversation history is not too long to maintain relevancy and coherence in the AI's responses. The history length is managed by MAX_HISTORY_LENGTH.

source


# PromptingTools.@ai_strMacro.
julia
ai"user_prompt"[model_alias] -> AIMessage

The ai"" string macro generates an AI response to a given prompt by using aigenerate under the hood.

See also ai!"" if you want to reply to the provided message / continue the conversation.

Arguments

  • user_prompt (String): The input prompt for the AI model.

  • model_alias (optional, any): Provide model alias of the AI model (see MODEL_ALIASES).

Returns

AIMessage corresponding to the input prompt.

Example

julia
result = ai"Hello, how are you?"
+# AIMessage("Considering our previous discussion, I think that...")

Usage Notes

  • This macro should be used when you want to maintain the context of an ongoing conversation (ie, the last ai"" message).

  • It automatically accesses and updates the global conversation history.

  • If no conversation history is found, it raises an assertion error, suggesting to initiate a new conversation using ai"" instead.

Important

Ensure that the conversation history is not too long to maintain relevancy and coherence in the AI's responses. The history length is managed by MAX_HISTORY_LENGTH.

source


# PromptingTools.@ai_strMacro.
julia
ai"user_prompt"[model_alias] -> AIMessage

The ai"" string macro generates an AI response to a given prompt by using aigenerate under the hood.

See also ai!"" if you want to reply to the provided message / continue the conversation.

Arguments

  • user_prompt (String): The input prompt for the AI model.

  • model_alias (optional, any): Provide model alias of the AI model (see MODEL_ALIASES).

Returns

AIMessage corresponding to the input prompt.

Example

julia
result = ai"Hello, how are you?"
 # AIMessage("Hello! I'm an AI assistant, so I don't have feelings, but I'm here to help you. How can I assist you today?")

If you want to interpolate some variables or additional context, simply use string interpolation:

julia
a=1
 result = ai"What is `$a+$a`?"
 # AIMessage("The sum of `1+1` is `2`.")

If you want to use a different model, eg, GPT-4, you can provide its alias as a flag:

julia
result = ai"What is `1.23 * 100 + 1`?"gpt4t
-# AIMessage("The answer is 124.")

source


# PromptingTools.@timeoutMacro.
julia
@timeout(seconds, expr_to_run, expr_when_fails)

Simple macro to run an expression with a timeout of seconds. If the expr_to_run fails to finish in seconds seconds, expr_when_fails is returned.

Example

julia
x = @timeout 1 begin
+# AIMessage("The answer is 124.")

source


# PromptingTools.@timeoutMacro.
julia
@timeout(seconds, expr_to_run, expr_when_fails)

Simple macro to run an expression with a timeout of seconds. If the expr_to_run fails to finish in seconds seconds, expr_when_fails is returned.

Example

julia
x = @timeout 1 begin
     sleep(1.1)
     println("done")
     1
-end "failed"

source


- +end "failed"

source


+ \ No newline at end of file diff --git a/previews/PR218/reference_agenttools.html b/previews/PR218/reference_agenttools.html index 192fa2d8e..b48f2a948 100644 --- a/previews/PR218/reference_agenttools.html +++ b/previews/PR218/reference_agenttools.html @@ -8,16 +8,16 @@ - + - + - + -
Skip to content

Reference for AgentTools

# PromptingTools.Experimental.AgentToolsModule.
julia
AgentTools

Provides Agentic functionality providing lazy calls for building pipelines (eg, AIGenerate) and AICodeFixer.

This module is experimental and may change at any time. It is intended to be moved to a separate package in the future.

source


# PromptingTools.Experimental.AgentTools.AICallType.
julia
AICall(func::F, args...; kwargs...) where {F<:Function}
+    
Skip to content

Reference for AgentTools

# PromptingTools.Experimental.AgentToolsModule.
julia
AgentTools

Provides Agentic functionality providing lazy calls for building pipelines (eg, AIGenerate) and AICodeFixer.

This module is experimental and may change at any time. It is intended to be moved to a separate package in the future.

source


# PromptingTools.Experimental.AgentTools.AICallType.
julia
AICall(func::F, args...; kwargs...) where {F<:Function}
 
 AIGenerate(args...; kwargs...)
 AIEmbed(args...; kwargs...)
@@ -32,7 +32,7 @@
 aicall = AIGenerate(:JuliaExpertAsk; ask="xyz", model="abc", api_kwargs=(; temperature=0.1))

Trigger the AICall with run! (it returns the update AICall struct back):

julia
aicall |> run!
 ````
 
-You can also use `AICall` as a functor to trigger the AI call with a `UserMessage` or simply the text to send:

julia aicall(UserMessage("Hello, world!")) # Triggers the lazy call result = run!(aicall) # Explicitly runs the AI call ``` This can be used to "reply" to previous message / continue the stored conversation

Notes

  • The AICall struct is a key component in building flexible and efficient Agentic pipelines

  • The lazy evaluation model allows for setting up the call parameters in advance and deferring the actual execution until it is explicitly triggered.

  • This struct is particularly useful in scenarios where the timing of AI function execution needs to be deferred or where multiple potential calls need to be prepared and selectively executed.

source


# PromptingTools.Experimental.AgentTools.AICodeFixerType.
julia
AICodeFixer(aicall::AICall, templates::Vector{<:PT.UserMessage}; num_rounds::Int = 3, feedback_func::Function = aicodefixer_feedback; kwargs...)
+You can also use `AICall` as a functor to trigger the AI call with a `UserMessage` or simply the text to send:

julia aicall(UserMessage("Hello, world!")) # Triggers the lazy call result = run!(aicall) # Explicitly runs the AI call ``` This can be used to "reply" to previous message / continue the stored conversation

Notes

  • The AICall struct is a key component in building flexible and efficient Agentic pipelines

  • The lazy evaluation model allows for setting up the call parameters in advance and deferring the actual execution until it is explicitly triggered.

  • This struct is particularly useful in scenarios where the timing of AI function execution needs to be deferred or where multiple potential calls need to be prepared and selectively executed.

source


# PromptingTools.Experimental.AgentTools.AICodeFixerType.
julia
AICodeFixer(aicall::AICall, templates::Vector{<:PT.UserMessage}; num_rounds::Int = 3, feedback_func::Function = aicodefixer_feedback; kwargs...)
 AICodeFixer(aicall::AICall, template::Union{AITemplate, Symbol} = :CodeFixerRCI; kwargs...)

An AIAgent that iteratively evaluates any received Julia code and provides feedback back to the AI model if num_rounds>0. AICodeFixer manages the lifecycle of a code fixing session, including tracking conversation history, rounds of interaction, and applying user feedback through a specialized feedback function.

It integrates with lazy AI call structures like AIGenerate.

The operation is "lazy", ie, the agent is only executed when needed, eg, when run! is called.

Fields

  • call::AICall: The AI call that is being used for code generation or processing, eg, AIGenerate (same as aigenerate but "lazy", ie, called only when needed

  • templates::Union{Symbol, AITemplate, Vector{PT.UserMessage}}: A set of user messages or templates that guide the AI's code fixing process. The first UserMessage is used in the first round of code fixing, the second UserMessage is used for every subsequent iteration.

  • num_rounds::Int: The number of rounds for the code fixing session. Defaults to 3.

  • round_counter::Int: Counter to track the current round of interaction.

  • feedback_func::Function: Function to generate feedback based on the AI's proposed code, defaults to aicodefixer_feedback (modular thanks to type dispatch on AbstractOutcomes)

  • kwargs::NamedTuple: Additional keyword arguments for customizing the AI call.

Note: Any kwargs provided to run!() will be passed to the underlying AICall.

Example

Let's create an AIGenerate call and then pipe it to AICodeFixer to run a few rounds of the coding fixing:

julia
# Create an AIGenerate call
 lazy_call = AIGenerate("Write a function to do XYZ...")
 
@@ -54,7 +54,7 @@
 Feedback: {{feedback}}")]; num_rounds = 2) |> run!
 
 # The result now contains the AI's attempts to fix the code
-preview(result.call.conversation)

Notes

  • AICodeFixer is particularly useful when code is hard to get right in one shot (eg, smaller models, complex syntax)

  • The structure leverages the lazy evaluation model of AICall (/AIGenerate) to efficiently manage AI interactions and be able to repeatedly call it.

  • The run! function executes the AI call and applies the feedback loop for the specified number of rounds, enabling an interactive code fixing process.

source


# PromptingTools.Experimental.AgentTools.RetryConfigType.
julia
RetryConfig

Configuration for self-fixing the AI calls. It includes the following fields:

Fields

  • retries::Int: The number of retries ("fixing rounds") that have been attempted so far.

  • calls::Int: The total number of SUCCESSFULLY generated ai* function calls made so far (across all samples/retry rounds). Ie, if a call fails, because of an API error, it's not counted, because it didn't reach the LLM.

  • max_retries::Int: The maximum number of retries ("fixing rounds") allowed for the AI call. Defaults to 10.

  • max_calls::Int: The maximum number of ai* function calls allowed for the AI call. Defaults to 99.

  • retry_delay::Int: The delay (in seconds) between retry rounds. Defaults to 0s.

  • n_samples::Int: The number of samples to generate in each ai* call round (to increase changes of successful pass). Defaults to 1.

  • scoring::AbstractScoringMethod: The scoring method to use for generating multiple samples. Defaults to UCT(sqrt(2)).

  • ordering::Symbol: The ordering to use for select the best samples. With :PostOrderDFS we prioritize leaves, with :PreOrderDFS we prioritize the root. Defaults to :PostOrderDFS.

  • feedback_inplace::Bool: Whether to provide feedback in previous UserMessage (and remove the past AIMessage) or to create a new UserMessage. Defaults to false.

  • feedback_template::Symbol: Template to use for feedback in place. Defaults to :FeedbackFromEvaluator.

  • temperature::Float64: The temperature to use for sampling. Relevant only if not defined in api_kwargs provided. Defaults to 0.7.

  • catch_errors::Bool: Whether to catch errors during run! of AICall. Saves them in aicall.error. Defaults to false.

source


# PromptingTools.Experimental.AgentTools.SampleNodeType.
julia
SampleNode{T}

A node in the Monte Carlo Tree Search tree.

It's used to hold the data we're trying to optimize/discover (eg, a conversation), the scores from evaluation (wins, visits) and the results of the evaluations upon failure (feedback).

Fields

  • id::UInt16: Unique identifier for the node

  • parent::Union{SampleNode, Nothing}: Parent node that current node was built on

  • children::Vector{SampleNode}: Children nodes

  • wins::Int: Number of successful outcomes

  • visits::Int: Number of condition checks done (eg, losses are checks - wins)

  • data::T: eg, the conversation or some parameter to be optimized

  • feedback::String: Feedback from the evaluation, always a string! Defaults to empty string.

  • success::Union{Nothing, Bool}: Success of the generation and subsequent evaluations, proxy for whether it should be further evaluated. Defaults to nothing.

source


# PromptingTools.Experimental.AgentTools.ThompsonSamplingType.
julia
ThompsonSampling <: AbstractScoringMethod

Implements scoring and selection for Thompson Sampling method. See https://en.wikipedia.org/wiki/Thompson_sampling for more details.

source


# PromptingTools.Experimental.AgentTools.UCTType.
julia
UCT <: AbstractScoringMethod

Implements scoring and selection for UCT (Upper Confidence Bound for Trees) sampling method. See https://en.wikipedia.org/wiki/Monte_Carlo_tree_search#Exploration_and_exploitation for more details.

source


# PromptingTools.Experimental.AgentTools.AIClassifyMethod.
julia
AIClassify(args...; kwargs...)

Creates a lazy instance of aiclassify. It is an instance of AICall with aiclassify as the function.

Use exactly the same arguments and keyword arguments as aiclassify (see ?aiclassify for details).

source


# PromptingTools.Experimental.AgentTools.AIEmbedMethod.
julia
AIEmbed(args...; kwargs...)

Creates a lazy instance of aiembed. It is an instance of AICall with aiembed as the function.

Use exactly the same arguments and keyword arguments as aiembed (see ?aiembed for details).

source


# PromptingTools.Experimental.AgentTools.AIExtractMethod.
julia
AIExtract(args...; kwargs...)

Creates a lazy instance of aiextract. It is an instance of AICall with aiextract as the function.

Use exactly the same arguments and keyword arguments as aiextract (see ?aiextract for details).

source


# PromptingTools.Experimental.AgentTools.AIGenerateMethod.
julia
AIGenerate(args...; kwargs...)

Creates a lazy instance of aigenerate. It is an instance of AICall with aigenerate as the function.

Use exactly the same arguments and keyword arguments as aigenerate (see ?aigenerate for details).

source


# PromptingTools.Experimental.AgentTools.AIScanMethod.
julia
AIScan(args...; kwargs...)

Creates a lazy instance of aiscan. It is an instance of AICall with aiscan as the function.

Use exactly the same arguments and keyword arguments as aiscan (see ?aiscan for details).

source


# PromptingTools.Experimental.AgentTools.add_feedback!Method.
julia
add_feedback!(
+preview(result.call.conversation)

Notes

  • AICodeFixer is particularly useful when code is hard to get right in one shot (eg, smaller models, complex syntax)

  • The structure leverages the lazy evaluation model of AICall (/AIGenerate) to efficiently manage AI interactions and be able to repeatedly call it.

  • The run! function executes the AI call and applies the feedback loop for the specified number of rounds, enabling an interactive code fixing process.

source


# PromptingTools.Experimental.AgentTools.RetryConfigType.
julia
RetryConfig

Configuration for self-fixing the AI calls. It includes the following fields:

Fields

  • retries::Int: The number of retries ("fixing rounds") that have been attempted so far.

  • calls::Int: The total number of SUCCESSFULLY generated ai* function calls made so far (across all samples/retry rounds). Ie, if a call fails, because of an API error, it's not counted, because it didn't reach the LLM.

  • max_retries::Int: The maximum number of retries ("fixing rounds") allowed for the AI call. Defaults to 10.

  • max_calls::Int: The maximum number of ai* function calls allowed for the AI call. Defaults to 99.

  • retry_delay::Int: The delay (in seconds) between retry rounds. Defaults to 0s.

  • n_samples::Int: The number of samples to generate in each ai* call round (to increase changes of successful pass). Defaults to 1.

  • scoring::AbstractScoringMethod: The scoring method to use for generating multiple samples. Defaults to UCT(sqrt(2)).

  • ordering::Symbol: The ordering to use for select the best samples. With :PostOrderDFS we prioritize leaves, with :PreOrderDFS we prioritize the root. Defaults to :PostOrderDFS.

  • feedback_inplace::Bool: Whether to provide feedback in previous UserMessage (and remove the past AIMessage) or to create a new UserMessage. Defaults to false.

  • feedback_template::Symbol: Template to use for feedback in place. Defaults to :FeedbackFromEvaluator.

  • temperature::Float64: The temperature to use for sampling. Relevant only if not defined in api_kwargs provided. Defaults to 0.7.

  • catch_errors::Bool: Whether to catch errors during run! of AICall. Saves them in aicall.error. Defaults to false.

source


# PromptingTools.Experimental.AgentTools.SampleNodeType.
julia
SampleNode{T}

A node in the Monte Carlo Tree Search tree.

It's used to hold the data we're trying to optimize/discover (eg, a conversation), the scores from evaluation (wins, visits) and the results of the evaluations upon failure (feedback).

Fields

  • id::UInt16: Unique identifier for the node

  • parent::Union{SampleNode, Nothing}: Parent node that current node was built on

  • children::Vector{SampleNode}: Children nodes

  • wins::Int: Number of successful outcomes

  • visits::Int: Number of condition checks done (eg, losses are checks - wins)

  • data::T: eg, the conversation or some parameter to be optimized

  • feedback::String: Feedback from the evaluation, always a string! Defaults to empty string.

  • success::Union{Nothing, Bool}: Success of the generation and subsequent evaluations, proxy for whether it should be further evaluated. Defaults to nothing.

source


# PromptingTools.Experimental.AgentTools.ThompsonSamplingType.
julia
ThompsonSampling <: AbstractScoringMethod

Implements scoring and selection for Thompson Sampling method. See https://en.wikipedia.org/wiki/Thompson_sampling for more details.

source


# PromptingTools.Experimental.AgentTools.UCTType.
julia
UCT <: AbstractScoringMethod

Implements scoring and selection for UCT (Upper Confidence Bound for Trees) sampling method. See https://en.wikipedia.org/wiki/Monte_Carlo_tree_search#Exploration_and_exploitation for more details.

source


# PromptingTools.Experimental.AgentTools.AIClassifyMethod.
julia
AIClassify(args...; kwargs...)

Creates a lazy instance of aiclassify. It is an instance of AICall with aiclassify as the function.

Use exactly the same arguments and keyword arguments as aiclassify (see ?aiclassify for details).

source


# PromptingTools.Experimental.AgentTools.AIEmbedMethod.
julia
AIEmbed(args...; kwargs...)

Creates a lazy instance of aiembed. It is an instance of AICall with aiembed as the function.

Use exactly the same arguments and keyword arguments as aiembed (see ?aiembed for details).

source


# PromptingTools.Experimental.AgentTools.AIExtractMethod.
julia
AIExtract(args...; kwargs...)

Creates a lazy instance of aiextract. It is an instance of AICall with aiextract as the function.

Use exactly the same arguments and keyword arguments as aiextract (see ?aiextract for details).

source


# PromptingTools.Experimental.AgentTools.AIGenerateMethod.
julia
AIGenerate(args...; kwargs...)

Creates a lazy instance of aigenerate. It is an instance of AICall with aigenerate as the function.

Use exactly the same arguments and keyword arguments as aigenerate (see ?aigenerate for details).

source


# PromptingTools.Experimental.AgentTools.AIScanMethod.
julia
AIScan(args...; kwargs...)

Creates a lazy instance of aiscan. It is an instance of AICall with aiscan as the function.

Use exactly the same arguments and keyword arguments as aiscan (see ?aiscan for details).

source


# PromptingTools.Experimental.AgentTools.add_feedback!Method.
julia
add_feedback!(
     conversation::AbstractVector{<:PT.AbstractMessage}, sample::SampleNode; feedback_inplace::Bool = false,
     feedback_template::Symbol = :FeedbackFromEvaluator)

Adds formatted feedback to the conversation based on the sample node feedback (and its ancestors).

Arguments

  • conversation::AbstractVector{<:PT.AbstractMessage}: The conversation to add the feedback to.

  • sample::SampleNode: The sample node to extract the feedback from.

  • feedback_inplace::Bool=false: If true, it will add the feedback to the last user message inplace (and pop the last AIMessage). Otherwise, it will append the feedback as a new message.

  • feedback_template::Symbol=:FeedbackFromEvaluator: The template to use for the feedback message. It must be a valid AITemplate name.

Example

julia
sample = SampleNode(; data = nothing, feedback = "Feedback X")
 conversation = [PT.UserMessage("I say hi!"), PT.AIMessage(; content = "I say hi!")]
@@ -62,14 +62,14 @@
 conversation[end].content == "### Feedback from Evaluator\nFeedback X\n"
 
 Inplace feedback:

julia conversation = [PT.UserMessage("I say hi!"), PT.AIMessage(; content = "I say hi!")] conversation = AT.add_feedback!(conversation, sample; feedback_inplace = true) conversation[end].content == "I say hi!\n\n### Feedback from Evaluator\nFeedback X\n"


-Sample with ancestors with feedback:

julia sample_p = SampleNode(; data = nothing, feedback = "\nFeedback X") sample = expand!(sample_p, nothing) sample.feedback = "\nFeedback Y" conversation = [PT.UserMessage("I say hi!"), PT.AIMessage(; content = "I say hi!")] conversation = AT.add_feedback!(conversation, sample)

conversation[end].content == "### Feedback from Evaluator\n\nFeedback X\n–––––\n\nFeedback Y\n" ```

source


# PromptingTools.Experimental.AgentTools.aicodefixer_feedbackMethod.
julia
aicodefixer_feedback(cb::AICode; max_length::Int = 512) -> NamedTuple(; feedback::String)
+Sample with ancestors with feedback:

julia sample_p = SampleNode(; data = nothing, feedback = "\nFeedback X") sample = expand!(sample_p, nothing) sample.feedback = "\nFeedback Y" conversation = [PT.UserMessage("I say hi!"), PT.AIMessage(; content = "I say hi!")] conversation = AT.add_feedback!(conversation, sample)

conversation[end].content == "### Feedback from Evaluator\n\nFeedback X\n–––––\n\nFeedback Y\n" ```

source


# PromptingTools.Experimental.AgentTools.aicodefixer_feedbackMethod.
julia
aicodefixer_feedback(cb::AICode; max_length::Int = 512) -> NamedTuple(; feedback::String)
 aicodefixer_feedback(conversation::AbstractVector{<:PT.AbstractMessage}; max_length::Int = 512) -> NamedTuple(; feedback::String)
 aicodefixer_feedback(msg::PT.AIMessage; max_length::Int = 512) -> NamedTuple(; feedback::String)
 aicodefixer_feedback(aicall::AICall; max_length::Int = 512) -> NamedTuple(; feedback::String)

Generate feedback for an AI code fixing session based on the AICode block /or conversation history (that will be used to extract and evaluate a code block). Function is designed to be extensible for different types of feedback and code evaluation outcomes.

The highlevel wrapper accepts a conversation and returns new kwargs for the AICall.

Individual feedback functions are dispatched on different subtypes of AbstractCodeOutcome and can be extended/overwritten to provide more detailed feedback.

See also: AIGenerate, AICodeFixer

Arguments

  • cb::AICode: AICode block to evaluate and provide feedback on.

  • max_length::Int=512: An optional argument that specifies the maximum length of the feedback message.

Returns

  • NamedTuple: A feedback message as a kwarg in NamedTuple based on the analysis of the code provided in the conversation.

Example

julia
cb = AICode(msg; skip_unsafe = true, capture_stdout = true)
 new_kwargs = aicodefixer_feedback(cb)
 
 new_kwargs = aicodefixer_feedback(msg)
-new_kwargs = aicodefixer_feedback(conversation)

Notes

This function is part of the AI code fixing system, intended to interact with code in AIMessage and provide feedback on improving it.

The highlevel wrapper accepts a conversation and returns new kwargs for the AICall.

It dispatches for the code feedback based on the subtypes of AbstractCodeOutcome below:

  • CodeEmpty: No code found in the message.

  • CodeFailedParse: Code parsing error.

  • CodeFailedEval: Runtime evaluation error.

  • CodeFailedTimeout: Code execution timed out.

  • CodeSuccess: Successful code execution.

You can override the individual methods to customize the feedback.

source


# PromptingTools.Experimental.AgentTools.airetry!Function.
julia
airetry!(
+new_kwargs = aicodefixer_feedback(conversation)

Notes

This function is part of the AI code fixing system, intended to interact with code in AIMessage and provide feedback on improving it.

The highlevel wrapper accepts a conversation and returns new kwargs for the AICall.

It dispatches for the code feedback based on the subtypes of AbstractCodeOutcome below:

  • CodeEmpty: No code found in the message.

  • CodeFailedParse: Code parsing error.

  • CodeFailedEval: Runtime evaluation error.

  • CodeFailedTimeout: Code execution timed out.

  • CodeSuccess: Successful code execution.

You can override the individual methods to customize the feedback.

source


# PromptingTools.Experimental.AgentTools.airetry!Function.
julia
airetry!(
     f_cond::Function, aicall::AICallBlock, feedback::Union{AbstractString, Function} = "";
     verbose::Bool = true, throw::Bool = false, evaluate_all::Bool = true, feedback_expensive::Bool = false,
     max_retries::Union{Nothing, Int} = nothing, retry_delay::Union{Nothing, Int} = nothing)

Evaluates the condition f_cond on the aicall object. If the condition is not met, it will return the best sample to retry from and provide feedback (string or function) to aicall. That's why it's mutating. It will retry maximum max_retries times, with throw=true, an error will be thrown if the condition is not met after max_retries retries.

Note: aicall must be run first via run!(aicall) before calling airetry!.

Function signatures

  • f_cond(aicall::AICallBlock) -> Bool, ie, it must accept the aicall object and return a boolean value.

  • feedback can be a string or feedback(aicall::AICallBlock) -> String, ie, it must accept the aicall object and return a string.

You can leverage the last_message, last_output, and AICode functions to access the last message, last output and execute code blocks in the conversation, respectively. See examples below.

Good Use Cases

  • Retry with API failures/drops (add retry_delay=2 to wait 2s between retries)

  • Check the output format / type / length / etc

  • Check the output with aiclassify call (LLM Judge) to catch unsafe/NSFW/out-of-scope content

  • Provide hints to the model to guide it to the correct answer

Gotchas

  • If controlling keyword arguments are set to nothing, they will fall back to the default values in aicall.config. You can override them by passing the keyword arguments explicitly.

  • If there multiple airetry! checks, they are evaluted sequentially. As long as throw==false, they will be all evaluated even if they failed previous checks.

  • Only samples which passed previous evaluations are evaluated (sample.success is true). If there are no successful samples, the function will evaluate only the active sample (aicall.active_sample_id) and nothing else.

  • Feedback from all "ancestor" evaluations is added upon retry, not feedback from the "sibblings" or other branches. To have only ONE long BRANCH (no sibblings), make sure to keep RetryConfig(; n_samples=1). That way the model will always see ALL previous feedback.

  • We implement a version of Monte Carlo Tree Search (MCTS) to always pick the most promising sample to restart from (you can tweak the options in RetryConfig to change the behaviour).

  • For large number of parallel branches (ie, "shallow and wide trees"), you might benefit from switching scoring to scoring=ThompsonSampling() (similar to how Bandit algorithms work).

  • Open-source/local models can struggle with too long conversation, you might want to experiment with in-place feedback (set RetryConfig(; feedback_inplace=true)).

Arguments

  • f_cond::Function: A function that accepts the aicall object and returns a boolean value. Retry will be attempted if the condition is not met (f_cond -> false).

  • aicall::AICallBlock: The aicall object to evaluate the condition on.

  • feedback::Union{AbstractString, Function}: Feedback to provide if the condition is not met. If a function is provided, it must accept the aicall object as the only argument and return a string.

  • verbose::Integer=1: A verbosity level for logging the retry attempts and warnings. A higher value indicates more detailed logging.

  • throw::Bool=false: If true, it will throw an error if the function f_cond does not return true after max_retries retries.

  • evaluate_all::Bool=false: If true, it will evaluate all the "successful" samples in the aicall object. Otherwise, it will only evaluate the active sample.

  • feedback_expensive::Bool=false: If false, it will provide feedback to all samples that fail the condition. If feedback function is expensive to call (eg, another ai* function), set this to true and feedback will be provided only to the sample we will retry from.

  • max_retries::Union{Nothing, Int}=nothing: Maximum number of retries. If not provided, it will fall back to the max_retries in aicall.config.

  • retry_delay::Union{Nothing, Int}=nothing: Delay between retries in seconds. If not provided, it will fall back to the retry_delay in aicall.config.

Returns

  • The aicall object with the updated conversation, and samples (saves the evaluations and their scores/feedback).

Example

You can use airetry! to catch API errors in run! and auto-retry the call. RetryConfig is how you influence all the subsequent retry behaviours - see ?RetryConfig for more details.

julia
# API failure because of a non-existent model
@@ -232,7 +232,7 @@
 ## ID: 32991, Guess: 50
 ## ID: 32991, Guess: 35
 ## ID: 32991, Guess: 33
-## etc...

Note that if there are multiple "branches" the model will see only the feedback of its own and its ancestors not the other "branches". If you wanted to provide ALL feedback, set RetryConfig(; n_samples=1) to remove any "branching". It fixing will be done sequentially in one conversation and the model will see all feedback (less powerful if the model falls into a bad state). Alternatively, you can tweak the feedback function.

See Also

References: airetry is inspired by the Language Agent Tree Search paper and by DSPy Assertions paper.

source


# PromptingTools.Experimental.AgentTools.backpropagate!Method.

Provides scores for a given node (and all its ancestors) based on the evaluation (wins, visits).

source


# PromptingTools.Experimental.AgentTools.beta_sampleMethod.
julia
beta_sample::Real, β::Real)

Approximates a sample from the Beta distribution by generating two independent Gamma distributed samples and using their ratio.

source


# PromptingTools.Experimental.AgentTools.collect_all_feedbackMethod.

Collects all feedback from the node and its ancestors (parents). Returns a string separated by separator.

source


# PromptingTools.Experimental.AgentTools.error_feedbackMethod.
julia
error_feedback(e::Any; max_length::Int = 512)

Set of specialized methods to provide feedback on different types of errors (e).

source


# PromptingTools.Experimental.AgentTools.evaluate_condition!Function.
julia
evaluate_condition!(f_cond::Function, aicall::AICallBlock,
+## etc...

Note that if there are multiple "branches" the model will see only the feedback of its own and its ancestors not the other "branches". If you wanted to provide ALL feedback, set RetryConfig(; n_samples=1) to remove any "branching". It fixing will be done sequentially in one conversation and the model will see all feedback (less powerful if the model falls into a bad state). Alternatively, you can tweak the feedback function.

See Also

References: airetry is inspired by the Language Agent Tree Search paper and by DSPy Assertions paper.

source


# PromptingTools.Experimental.AgentTools.backpropagate!Method.

Provides scores for a given node (and all its ancestors) based on the evaluation (wins, visits).

source


# PromptingTools.Experimental.AgentTools.beta_sampleMethod.
julia
beta_sample::Real, β::Real)

Approximates a sample from the Beta distribution by generating two independent Gamma distributed samples and using their ratio.

source


# PromptingTools.Experimental.AgentTools.collect_all_feedbackMethod.

Collects all feedback from the node and its ancestors (parents). Returns a string separated by separator.

source


# PromptingTools.Experimental.AgentTools.error_feedbackMethod.
julia
error_feedback(e::Any; max_length::Int = 512)

Set of specialized methods to provide feedback on different types of errors (e).

source


# PromptingTools.Experimental.AgentTools.evaluate_condition!Function.
julia
evaluate_condition!(f_cond::Function, aicall::AICallBlock,
     feedback::Union{AbstractString, Function} = "";
     evaluate_all::Bool = true, feedback_expensive::Bool = false)

Evalutes the condition f_cond (must return Bool) on the aicall object. If the condition is not met, it will return the best sample to retry from and provide feedback.

Mutating as the results are saved in aicall.samples

If evaluate_all is true, it will evaluate all the "successful" samples in the aicall object. Otherwise, it will only evaluate the active sample..

For f_cond and feedback functions, you can use the last_message and last_output utilities to access the last message and last output in the conversation, respectively.

Arguments

  • f_cond::Function: A function that accepts the aicall object and returns a boolean value. Retry will be attempted if the condition is not met (f_cond -> false).

  • aicall::AICallBlock: The aicall object to evaluate the condition on.

  • feedback::Union{AbstractString, Function}: Feedback to provide if the condition is not met. If a function is provided, it must accept the aicall object as the only argument and return a string.

  • evaluate_all::Bool=false: If true, it will evaluate all the "successful" samples in the aicall object. Otherwise, it will only evaluate the active sample.

  • feedback_expensive::Bool=false: If false, it will provide feedback to all samples that fail the condition. If feedback function is expensive to call (eg, another ai* function), set this to true and feedback will be provided only to the sample we will retry from.

Returns

  • a tuple (condition_passed, sample), where condition_passed is a boolean indicating whether the condition was met, and sample is the best sample to retry from.

Example

julia
# Mimic AIGenerate run!
 aicall = AIGenerate("Say hi!"; config = RetryConfig(; n_samples = 2))
@@ -245,11 +245,11 @@
 # Checks:
 cond == true
 node == sample
-node.wins == 1

With feedback: ```julia

Mimic AIGenerate run with feedback

aicall = AIGenerate( :BlankSystemUser; system = "a", user = "b") sample = expand!(aicall.samples, aicall.conversation; success = true) aicall.active_sample_id = sample.id

Evaluate

cond, node = AT.evaluate_condition!( x -> occursin("NOTFOUND", last_output(x)), aicall, "Feedback X") cond == false # fail sample == node # same node (no other choice) node.wins == 0 node.feedback == " Feedback X"

source


# PromptingTools.Experimental.AgentTools.expand!Method.

Expands the tree with a new node from parent using the given data and success.

source


# PromptingTools.Experimental.AgentTools.extract_configMethod.

Extracts config::RetryConfig from kwargs and returns the rest of the kwargs.

source


# PromptingTools.Experimental.AgentTools.find_nodeMethod.

Finds a node with a given id in the tree starting from node.

source


# PromptingTools.Experimental.AgentTools.gamma_sampleMethod.
julia
gamma_sample::Real, θ::Real)

Approximates a sample from the Gamma distribution using the Marsaglia and Tsang method.

source


# PromptingTools.Experimental.AgentTools.print_samplesMethod.

Pretty prints the samples tree starting from node. Usually, node is the root of the tree. Example: print_samples(aicall.samples).

source


# PromptingTools.Experimental.AgentTools.remove_used_kwargsMethod.

Removes the kwargs that have already been used in the conversation. Returns NamedTuple.

source


# PromptingTools.Experimental.AgentTools.reset_success!Function.

Sets the success field of all nodes in the tree to success value.

source


# PromptingTools.Experimental.AgentTools.run!Method.
julia
run!(codefixer::AICodeFixer; verbose::Int = 1, max_conversation_length::Int = 32000, run_kwargs...)

Executes the code fixing process encapsulated by the AICodeFixer instance. This method iteratively refines and fixes code by running the AI call in a loop for a specified number of rounds, using feedback from the code evaluation (aicodefixer_feedback) to improve the outcome in each iteration.

Arguments

  • codefixer::AICodeFixer: An instance of AICodeFixer containing the AI call, templates, and settings for the code fixing session.

  • verbose::Int=1: Verbosity level for logging. A higher value indicates more detailed logging.

  • max_conversation_length::Int=32000: Maximum length in characters for the conversation history to keep it within manageable limits, especially for large code fixing sessions.

  • num_rounds::Union{Nothing, Int}=nothing: Number of additional rounds for the code fixing session. If nothing, the value from the AICodeFixer instance is used.

  • run_kwargs...: Additional keyword arguments that are passed to the AI function.

Returns

  • AICodeFixer: The updated AICodeFixer instance with the results of the code fixing session.

Usage

julia
aicall = AICall(aigenerate, schema=mySchema, conversation=myConversation)
+node.wins == 1

With feedback: ```julia

Mimic AIGenerate run with feedback

aicall = AIGenerate( :BlankSystemUser; system = "a", user = "b") sample = expand!(aicall.samples, aicall.conversation; success = true) aicall.active_sample_id = sample.id

Evaluate

cond, node = AT.evaluate_condition!( x -> occursin("NOTFOUND", last_output(x)), aicall, "Feedback X") cond == false # fail sample == node # same node (no other choice) node.wins == 0 node.feedback == " Feedback X"

source


# PromptingTools.Experimental.AgentTools.expand!Method.

Expands the tree with a new node from parent using the given data and success.

source


# PromptingTools.Experimental.AgentTools.extract_configMethod.

Extracts config::RetryConfig from kwargs and returns the rest of the kwargs.

source


# PromptingTools.Experimental.AgentTools.find_nodeMethod.

Finds a node with a given id in the tree starting from node.

source


# PromptingTools.Experimental.AgentTools.gamma_sampleMethod.
julia
gamma_sample::Real, θ::Real)

Approximates a sample from the Gamma distribution using the Marsaglia and Tsang method.

source


# PromptingTools.Experimental.AgentTools.print_samplesMethod.

Pretty prints the samples tree starting from node. Usually, node is the root of the tree. Example: print_samples(aicall.samples).

source


# PromptingTools.Experimental.AgentTools.remove_used_kwargsMethod.

Removes the kwargs that have already been used in the conversation. Returns NamedTuple.

source


# PromptingTools.Experimental.AgentTools.reset_success!Function.

Sets the success field of all nodes in the tree to success value.

source


# PromptingTools.Experimental.AgentTools.run!Method.
julia
run!(codefixer::AICodeFixer; verbose::Int = 1, max_conversation_length::Int = 32000, run_kwargs...)

Executes the code fixing process encapsulated by the AICodeFixer instance. This method iteratively refines and fixes code by running the AI call in a loop for a specified number of rounds, using feedback from the code evaluation (aicodefixer_feedback) to improve the outcome in each iteration.

Arguments

  • codefixer::AICodeFixer: An instance of AICodeFixer containing the AI call, templates, and settings for the code fixing session.

  • verbose::Int=1: Verbosity level for logging. A higher value indicates more detailed logging.

  • max_conversation_length::Int=32000: Maximum length in characters for the conversation history to keep it within manageable limits, especially for large code fixing sessions.

  • num_rounds::Union{Nothing, Int}=nothing: Number of additional rounds for the code fixing session. If nothing, the value from the AICodeFixer instance is used.

  • run_kwargs...: Additional keyword arguments that are passed to the AI function.

Returns

  • AICodeFixer: The updated AICodeFixer instance with the results of the code fixing session.

Usage

julia
aicall = AICall(aigenerate, schema=mySchema, conversation=myConversation)
 codefixer = AICodeFixer(aicall, myTemplates; num_rounds=5)
-result = run!(codefixer, verbose=2)

Notes

  • The run! method drives the core logic of the AICodeFixer, iterating through rounds of AI interactions to refine and fix code.

  • In each round, it applies feedback based on the current state of the conversation, allowing the AI to respond more effectively.

  • The conversation history is managed to ensure it stays within the specified max_conversation_length, keeping the AI's focus on relevant parts of the conversation.

  • This iterative process is essential for complex code fixing tasks where multiple interactions and refinements are required to achieve the desired outcome.

source


# PromptingTools.Experimental.AgentTools.run!Method.
julia
run!(aicall::AICallBlock; verbose::Int = 1, catch_errors::Bool = false, return_all::Bool = true, kwargs...)

Executes the AI call wrapped by an AICallBlock instance. This method triggers the actual communication with the AI model and processes the response based on the provided conversation context and parameters.

Note: Currently return_all must always be set to true.

Arguments

  • aicall::AICallBlock: An instance of AICallBlock which encapsulates the AI function call along with its context and parameters (eg, AICall, AIGenerate)

  • verbose::Integer=1: A verbosity level for logging. A higher value indicates more detailed logging.

  • catch_errors::Union{Nothing, Bool}=nothing: A flag to indicate whether errors should be caught and saved to aicall.error. If nothing, it defaults to aicall.config.catch_errors.

  • return_all::Bool=true: A flag to indicate whether the whole conversation from the AI call should be returned. It should always be true.

  • kwargs...: Additional keyword arguments that are passed to the AI function.

Returns

  • AICallBlock: The same AICallBlock instance, updated with the results of the AI call. This includes updated conversation, success status, and potential error information.

Example

julia
aicall = AICall(aigenerate)
+result = run!(codefixer, verbose=2)

Notes

  • The run! method drives the core logic of the AICodeFixer, iterating through rounds of AI interactions to refine and fix code.

  • In each round, it applies feedback based on the current state of the conversation, allowing the AI to respond more effectively.

  • The conversation history is managed to ensure it stays within the specified max_conversation_length, keeping the AI's focus on relevant parts of the conversation.

  • This iterative process is essential for complex code fixing tasks where multiple interactions and refinements are required to achieve the desired outcome.

source


# PromptingTools.Experimental.AgentTools.run!Method.
julia
run!(aicall::AICallBlock; verbose::Int = 1, catch_errors::Bool = false, return_all::Bool = true, kwargs...)

Executes the AI call wrapped by an AICallBlock instance. This method triggers the actual communication with the AI model and processes the response based on the provided conversation context and parameters.

Note: Currently return_all must always be set to true.

Arguments

  • aicall::AICallBlock: An instance of AICallBlock which encapsulates the AI function call along with its context and parameters (eg, AICall, AIGenerate)

  • verbose::Integer=1: A verbosity level for logging. A higher value indicates more detailed logging.

  • catch_errors::Union{Nothing, Bool}=nothing: A flag to indicate whether errors should be caught and saved to aicall.error. If nothing, it defaults to aicall.config.catch_errors.

  • return_all::Bool=true: A flag to indicate whether the whole conversation from the AI call should be returned. It should always be true.

  • kwargs...: Additional keyword arguments that are passed to the AI function.

Returns

  • AICallBlock: The same AICallBlock instance, updated with the results of the AI call. This includes updated conversation, success status, and potential error information.

Example

julia
aicall = AICall(aigenerate)
 run!(aicall)

Alternatively, you can trigger the run! call by using the AICall as a functor and calling it with a string or a UserMessage:

julia
aicall = AICall(aigenerate)
-aicall("Say hi!")

Notes

  • The run! method is a key component of the lazy evaluation model in AICall. It allows for the deferred execution of AI function calls, providing flexibility in how and when AI interactions are conducted.

  • The method updates the AICallBlock instance with the outcome of the AI call, including any generated responses, success or failure status, and error information if an error occurred.

  • This method is essential for scenarios where AI interactions are based on dynamic or evolving contexts, as it allows for real-time updates and responses based on the latest information.

source


# PromptingTools.Experimental.AgentTools.scoreMethod.

Scores a node using the ThomsonSampling method, similar to Bandit algorithms.

source


# PromptingTools.Experimental.AgentTools.scoreMethod.

Scores a node using the UCT (Upper Confidence Bound for Trees) method.

source


# PromptingTools.Experimental.AgentTools.select_bestFunction.
julia
select_best(node::SampleNode, scoring::AbstractScoringMethod = UCT();
+aicall("Say hi!")

Notes

  • The run! method is a key component of the lazy evaluation model in AICall. It allows for the deferred execution of AI function calls, providing flexibility in how and when AI interactions are conducted.

  • The method updates the AICallBlock instance with the outcome of the AI call, including any generated responses, success or failure status, and error information if an error occurred.

  • This method is essential for scenarios where AI interactions are based on dynamic or evolving contexts, as it allows for real-time updates and responses based on the latest information.

source


# PromptingTools.Experimental.AgentTools.scoreMethod.

Scores a node using the ThomsonSampling method, similar to Bandit algorithms.

source


# PromptingTools.Experimental.AgentTools.scoreMethod.

Scores a node using the UCT (Upper Confidence Bound for Trees) method.

source


# PromptingTools.Experimental.AgentTools.select_bestFunction.
julia
select_best(node::SampleNode, scoring::AbstractScoringMethod = UCT();
     ordering::Symbol = :PostOrderDFS)

Selects the best node from the tree using the given scoring (UCT or ThompsonSampling). Defaults to UCT. Thompson Sampling is more random with small samples, while UCT stabilizes much quicker thanks to looking at parent nodes as well.

Ordering can be either :PreOrderDFS or :PostOrderDFS. Defaults to :PostOrderDFS, which favors the leaves (end points of the tree).

Example

Compare the different scoring methods:

julia
# Set up mock samples and scores
 data = PT.AbstractMessage[]
 root = SampleNode(; data)
@@ -280,9 +280,9 @@
 ## SampleNode(id: 13184, stats: 2/3, score: 0.6, length: 0)
 ## ├─ SampleNode(id: 26078, stats: 2/2, score: 0.93, length: 0)
 ## │  └─ SampleNode(id: 29826, stats: 1/1, score: 0.22, length: 0)
-## └─ SampleNode(id: 39931, stats: 0/1, score: 0.84, length: 0)

source


# PromptingTools.Experimental.AgentTools.split_multi_samplesMethod.

If the conversation has multiple AIMessage samples, split them into separate conversations with the common past.

source


# PromptingTools.Experimental.AgentTools.truncate_conversationMethod.
julia
truncate_conversation(conversation::AbstractVector{<:PT.AbstractMessage};
-    max_conversation_length::Int = 32000)

Truncates a given conversation to a max_conversation_length characters by removing messages "in the middle". It tries to retain the original system+user message and also the most recent messages.

Practically, if a conversation is too long, it will start by removing the most recent message EXCEPT for the last two (assumed to be the last AIMessage with the code and UserMessage with the feedback

Arguments

max_conversation_length is in characters; assume c. 2-3 characters per LLM token, so 32000 should correspond to 16K context window.

source


# PromptingTools.Experimental.AgentTools.unwrap_aicall_argsMethod.

Unwraps the arguments for AICall and returns the schema and conversation (if provided). Expands any provided AITemplate.

source


# PromptingTools.last_messageMethod.

Helpful accessor for AICall blocks. Returns the last message in the conversation.

source


# PromptingTools.last_outputMethod.

Helpful accessor for AICall blocks. Returns the last output in the conversation (eg, the string/data in the last message).

source


- +## └─ SampleNode(id: 39931, stats: 0/1, score: 0.84, length: 0)

source


# PromptingTools.Experimental.AgentTools.split_multi_samplesMethod.

If the conversation has multiple AIMessage samples, split them into separate conversations with the common past.

source


# PromptingTools.Experimental.AgentTools.truncate_conversationMethod.
julia
truncate_conversation(conversation::AbstractVector{<:PT.AbstractMessage};
+    max_conversation_length::Int = 32000)

Truncates a given conversation to a max_conversation_length characters by removing messages "in the middle". It tries to retain the original system+user message and also the most recent messages.

Practically, if a conversation is too long, it will start by removing the most recent message EXCEPT for the last two (assumed to be the last AIMessage with the code and UserMessage with the feedback

Arguments

max_conversation_length is in characters; assume c. 2-3 characters per LLM token, so 32000 should correspond to 16K context window.

source


# PromptingTools.Experimental.AgentTools.unwrap_aicall_argsMethod.

Unwraps the arguments for AICall and returns the schema and conversation (if provided). Expands any provided AITemplate.

source


# PromptingTools.last_messageMethod.

Helpful accessor for AICall blocks. Returns the last message in the conversation.

source


# PromptingTools.last_outputMethod.

Helpful accessor for AICall blocks. Returns the last output in the conversation (eg, the string/data in the last message).

source


+ \ No newline at end of file diff --git a/previews/PR218/reference_apitools.html b/previews/PR218/reference_apitools.html index a4719cfb3..4802ae0fc 100644 --- a/previews/PR218/reference_apitools.html +++ b/previews/PR218/reference_apitools.html @@ -8,24 +8,24 @@ - + - + - +
Skip to content

Reference for APITools

# PromptingTools.Experimental.APITools.create_websearchMethod.
julia
create_websearch(query::AbstractString;
     api_key::AbstractString,
-    search_depth::AbstractString = "basic")

Arguments

  • query::AbstractString: The query to search for.

  • api_key::AbstractString: The API key to use for the search. Get an API key from Tavily.

  • search_depth::AbstractString: The depth of the search. Can be either "basic" or "advanced". Default is "basic". Advanced search calls equal to 2 requests.

  • include_answer::Bool: Whether to include the answer in the search results. Default is false.

  • include_raw_content::Bool: Whether to include the raw content in the search results. Default is false.

  • max_results::Integer: The maximum number of results to return. Default is 5.

  • include_images::Bool: Whether to include images in the search results. Default is false.

  • include_domains::AbstractVector{<:AbstractString}: A list of domains to include in the search results. Default is an empty list.

  • exclude_domains::AbstractVector{<:AbstractString}: A list of domains to exclude from the search results. Default is an empty list.

Example

julia
r = create_websearch("Who is King Charles?")

Even better, you can get not just the results but also the answer:

julia
r = create_websearch("Who is King Charles?"; include_answer = true)

See Rest API documentation for more information.

source


# PromptingTools.Experimental.APITools.tavily_apiMethod.
julia
tavily_api(;
+    search_depth::AbstractString = "basic")

Arguments

  • query::AbstractString: The query to search for.

  • api_key::AbstractString: The API key to use for the search. Get an API key from Tavily.

  • search_depth::AbstractString: The depth of the search. Can be either "basic" or "advanced". Default is "basic". Advanced search calls equal to 2 requests.

  • include_answer::Bool: Whether to include the answer in the search results. Default is false.

  • include_raw_content::Bool: Whether to include the raw content in the search results. Default is false.

  • max_results::Integer: The maximum number of results to return. Default is 5.

  • include_images::Bool: Whether to include images in the search results. Default is false.

  • include_domains::AbstractVector{<:AbstractString}: A list of domains to include in the search results. Default is an empty list.

  • exclude_domains::AbstractVector{<:AbstractString}: A list of domains to exclude from the search results. Default is an empty list.

Example

julia
r = create_websearch("Who is King Charles?")

Even better, you can get not just the results but also the answer:

julia
r = create_websearch("Who is King Charles?"; include_answer = true)

See Rest API documentation for more information.

source


# PromptingTools.Experimental.APITools.tavily_apiMethod.
julia
tavily_api(;
     api_key::AbstractString,
     endpoint::String = "search",
     url::AbstractString = "https://api.tavily.com",
     http_kwargs::NamedTuple = NamedTuple(),
-    kwargs...)

Sends API requests to Tavily and returns the response.

source


- + kwargs...)

Sends API requests to Tavily and returns the response.

source


+ \ No newline at end of file diff --git a/previews/PR218/reference_experimental.html b/previews/PR218/reference_experimental.html index 1daaf0838..2b0d7ae70 100644 --- a/previews/PR218/reference_experimental.html +++ b/previews/PR218/reference_experimental.html @@ -8,17 +8,17 @@ - + - + - + -
Skip to content

Reference for Experimental Module

Note: This module is experimental and may change in future releases. The intention is for the functionality to be moved to separate packages over time.

# PromptingTools.ExperimentalModule.
julia
Experimental

This module is for experimental code that is not yet ready for production. It is not included in the main module, so it must be explicitly imported.

Contains:

  • RAGTools: Retrieval-Augmented Generation (RAG) functionality.

  • AgentTools: Agentic functionality - lazy calls for building pipelines (eg, AIGenerate) and AICodeFixer.

  • APITools: APIs to complement GenAI workflows (eg, Tavily Search API).

source


- +
Skip to content

Reference for Experimental Module

Note: This module is experimental and may change in future releases. The intention is for the functionality to be moved to separate packages over time.

# PromptingTools.ExperimentalModule.
julia
Experimental

This module is for experimental code that is not yet ready for production. It is not included in the main module, so it must be explicitly imported.

Contains:

  • RAGTools: Retrieval-Augmented Generation (RAG) functionality.

  • AgentTools: Agentic functionality - lazy calls for building pipelines (eg, AIGenerate) and AICodeFixer.

  • APITools: APIs to complement GenAI workflows (eg, Tavily Search API).

source


+ \ No newline at end of file diff --git a/previews/PR218/reference_ragtools.html b/previews/PR218/reference_ragtools.html index fa6b68bab..cf5598eab 100644 --- a/previews/PR218/reference_ragtools.html +++ b/previews/PR218/reference_ragtools.html @@ -8,16 +8,16 @@ - + - + - + -
Skip to content

Reference for RAGTools

# PromptingTools.Experimental.RAGToolsModule.
julia
RAGTools

Provides Retrieval-Augmented Generation (RAG) functionality.

Requires: LinearAlgebra, SparseArrays, Unicode, PromptingTools for proper functionality.

This module is experimental and may change at any time. It is intended to be moved to a separate package in the future.

source


# PromptingTools.Experimental.RAGTools.AbstractCandidateChunksType.
julia
AbstractCandidateChunks

Abstract type for storing candidate chunks, ie, references to items in a AbstractChunkIndex.

Return type from find_closest and find_tags functions.

Required Fields

  • index_id::Symbol: the id of the index from which the candidates are drawn

  • positions::Vector{Int}: the positions of the candidates in the index

  • scores::Vector{Float32}: the similarity scores of the candidates from the query (higher is better)

source


# PromptingTools.Experimental.RAGTools.AbstractChunkIndexType.
julia
AbstractChunkIndex <: AbstractDocumentIndex

Main abstract type for storing document chunks and their embeddings. It also stores tags and sources for each chunk.

Required Fields

  • id::Symbol: unique identifier of each index (to ensure we're using the right index with CandidateChunks)

  • chunks::Vector{<:AbstractString}: underlying document chunks / snippets

  • embeddings::Union{Nothing, Matrix{<:Real}}: for semantic search

  • tags::Union{Nothing, AbstractMatrix{<:Bool}}: for exact search, filtering, etc. This is often a sparse matrix indicating which chunks have the given tag (see tag_vocab for the position lookup)

  • tags_vocab::Union{Nothing, Vector{<:AbstractString}}: vocabulary for the tags matrix (each column in tags is one item in tags_vocab and rows are the chunks)

  • sources::Vector{<:AbstractString}: sources of the chunks

  • extras::Union{Nothing, AbstractVector}: additional data, eg, metadata, source code, etc.

source


# PromptingTools.Experimental.RAGTools.AbstractGeneratorType.
julia
AbstractGenerator <: AbstractGenerationMethod

Abstract type for generating an answer with generate! (use to change the process / return type of generate).

Required Fields

  • contexter::AbstractContextBuilder: the context building method, dispatching `build_context!

  • answerer::AbstractAnswerer: the answer generation method, dispatching answer!

  • refiner::AbstractRefiner: the answer refining method, dispatching refine!

  • postprocessor::AbstractPostprocessor: the postprocessing method, dispatching postprocess!

source


# PromptingTools.Experimental.RAGTools.AbstractIndexBuilderType.
julia
AbstractIndexBuilder

Abstract type for building an index with build_index (use to change the process / return type of build_index).

Required Fields

  • chunker::AbstractChunker: the chunking method, dispatching get_chunks

  • embedder::AbstractEmbedder: the embedding method, dispatching get_embeddings

  • tagger::AbstractTagger: the tagging method, dispatching get_tags

source


# PromptingTools.Experimental.RAGTools.AbstractMultiIndexType.
julia
AbstractMultiIndex <: AbstractDocumentIndex

Experimental abstract type for storing multiple document indexes. Not yet implemented.

source


# PromptingTools.Experimental.RAGTools.AbstractRetrieverType.
julia
AbstractRetriever <: AbstractRetrievalMethod

Abstract type for retrieving chunks from an index with retrieve (use to change the process / return type of retrieve).

Required Fields

  • rephraser::AbstractRephraser: the rephrasing method, dispatching rephrase

  • finder::AbstractSimilarityFinder: the similarity search method, dispatching find_closest

  • filter::AbstractTagFilter: the tag matching method, dispatching find_tags

  • reranker::AbstractReranker: the reranking method, dispatching rerank

source


# PromptingTools.Experimental.RAGTools.AdvancedGeneratorType.
julia
AdvancedGenerator <: AbstractGenerator

Default implementation for generate!. It simply enumerates context snippets and runs aigenerate (no refinement).

It uses ContextEnumerator, SimpleAnswerer, SimpleRefiner, and NoPostprocessor as default contexter, answerer, refiner, and postprocessor.

source


# PromptingTools.Experimental.RAGTools.AdvancedRetrieverType.
julia
AdvancedRetriever <: AbstractRetriever

Dispatch for retrieve with advanced retrieval methods to improve result quality. Compared to SimpleRetriever, it adds rephrasing the query and reranking the results.

Fields

  • rephraser::AbstractRephraser: the rephrasing method, dispatching rephrase - uses HyDERephraser

  • embedder::AbstractEmbedder: the embedding method, dispatching get_embeddings (see Preparation Stage for more details) - uses BatchEmbedder

  • processor::AbstractProcessor: the processor method, dispatching get_keywords (see Preparation Stage for more details) - uses NoProcessor

  • finder::AbstractSimilarityFinder: the similarity search method, dispatching find_closest - uses CosineSimilarity

  • tagger::AbstractTagger: the tag generating method, dispatching get_tags (see Preparation Stage for more details) - uses NoTagger

  • filter::AbstractTagFilter: the tag matching method, dispatching find_tags - uses NoTagFilter

  • reranker::AbstractReranker: the reranking method, dispatching rerank - uses CohereReranker

source


# PromptingTools.Experimental.RAGTools.AllTagFilterType.
julia
AllTagFilter <: AbstractTagFilter

Finds the chunks that have ALL OF the specified tag(s). A method for find_tags.

source


# PromptingTools.Experimental.RAGTools.AnnotatedNodeType.
julia
AnnotatedNode{T}  <: AbstractAnnotatedNode

A node to add annotations to the generated answer in airag

Annotations can be: sources, scores, whether its supported or not by the context, etc.

Fields

  • group_id::Int: Unique identifier for the same group of nodes (eg, different lines of the same code block)

  • parent::Union{AnnotatedNode, Nothing}: Parent node that current node was built on

  • children::Vector{AnnotatedNode}: Children nodes

  • `score::

source


# PromptingTools.Experimental.RAGTools.AnyTagFilterType.
julia
AnyTagFilter <: AbstractTagFilter

Finds the chunks that have ANY OF the specified tag(s). A method for find_tags.

source


# PromptingTools.Experimental.RAGTools.BM25SimilarityType.
julia
BM25Similarity <: AbstractSimilarityFinder

Finds the closest chunks to a query embedding by measuring the BM25 similarity between the query and the chunks' embeddings in binary form. A method for find_closest.

Reference: Wikipedia: BM25. Implementation follows: The Next Generation of Lucene Relevance.

source


# PromptingTools.Experimental.RAGTools.BatchEmbedderType.
julia
BatchEmbedder <: AbstractEmbedder

Default embedder for get_embeddings functions. It passes individual documents to be embedded in chunks to aiembed.

source


# PromptingTools.Experimental.RAGTools.BinaryBatchEmbedderType.
julia
BinaryBatchEmbedder <: AbstractEmbedder

Same as BatchEmbedder but reduces the embeddings matrix to a binary form (eg, BitMatrix). Defines a method for get_embeddings.

Reference: HuggingFace: Embedding Quantization.

source


# PromptingTools.Experimental.RAGTools.BinaryCosineSimilarityType.
julia
BinaryCosineSimilarity <: AbstractSimilarityFinder

Finds the closest chunks to a query embedding by measuring the Hamming distance AND cosine similarity between the query and the chunks' embeddings in binary form. A method for find_closest.

It follows the two-pass approach:

  • First pass: Hamming distance in binary form to get the top_k * rescore_multiplier (ie, more than top_k) candidates.

  • Second pass: Rescore the candidates with float embeddings and return the top_k.

Reference: HuggingFace: Embedding Quantization.

source


# PromptingTools.Experimental.RAGTools.BitPackedBatchEmbedderType.
julia
BitPackedBatchEmbedder <: AbstractEmbedder

Same as BatchEmbedder but reduces the embeddings matrix to a binary form packed in UInt64 (eg, BitMatrix.chunks). Defines a method for get_embeddings.

See also utilities pack_bits and unpack_bits to move between packed/non-packed binary forms.

Reference: HuggingFace: Embedding Quantization.

source


# PromptingTools.Experimental.RAGTools.BitPackedCosineSimilarityType.
julia
BitPackedCosineSimilarity <: AbstractSimilarityFinder

Finds the closest chunks to a query embedding by measuring the Hamming distance AND cosine similarity between the query and the chunks' embeddings in binary form. A method for find_closest.

The difference to BinaryCosineSimilarity is that the binary values are packed into UInt64, which is more efficient.

Reference: HuggingFace: Embedding Quantization. Implementation of hamming_distance is based on TinyRAG.

source


# PromptingTools.Experimental.RAGTools.CandidateChunksType.
julia
CandidateChunks

A struct for storing references to chunks in the given index (identified by index_id) called positions and scores holding the strength of similarity (=1 is the highest, most similar). It's the result of the retrieval stage of RAG.

Fields

  • index_id::Symbol: the id of the index from which the candidates are drawn

  • positions::Vector{Int}: the positions of the candidates in the index (ie, 5 refers to the 5th chunk in the index - chunks(index)[5])

  • scores::Vector{Float32}: the similarity scores of the candidates from the query (higher is better)

source


# PromptingTools.Experimental.RAGTools.ChunkEmbeddingsIndexType.
julia
ChunkEmbeddingsIndex

Main struct for storing document chunks and their embeddings. It also stores tags and sources for each chunk.

Previously, this struct was called ChunkIndex.

Fields

  • id::Symbol: unique identifier of each index (to ensure we're using the right index with CandidateChunks)

  • chunks::Vector{<:AbstractString}: underlying document chunks / snippets

  • embeddings::Union{Nothing, Matrix{<:Real}}: for semantic search

  • tags::Union{Nothing, AbstractMatrix{<:Bool}}: for exact search, filtering, etc. This is often a sparse matrix indicating which chunks have the given tag (see tag_vocab for the position lookup)

  • tags_vocab::Union{Nothing, Vector{<:AbstractString}}: vocabulary for the tags matrix (each column in tags is one item in tags_vocab and rows are the chunks)

  • sources::Vector{<:AbstractString}: sources of the chunks

  • extras::Union{Nothing, AbstractVector}: additional data, eg, metadata, source code, etc.

source


# PromptingTools.Experimental.RAGTools.ChunkKeywordsIndexType.
julia
ChunkKeywordsIndex

Struct for storing chunks of text and associated keywords for BM25 similarity search.

Fields

  • id::Symbol: unique identifier of each index (to ensure we're using the right index with CandidateChunks)

  • chunks::Vector{<:AbstractString}: underlying document chunks / snippets

  • chunkdata::Union{Nothing, AbstractMatrix{<:Real}}: for similarity search, assumed to be DocumentTermMatrix

  • tags::Union{Nothing, AbstractMatrix{<:Bool}}: for exact search, filtering, etc. This is often a sparse matrix indicating which chunks have the given tag (see tag_vocab for the position lookup)

  • tags_vocab::Union{Nothing, Vector{<:AbstractString}}: vocabulary for the tags matrix (each column in tags is one item in tags_vocab and rows are the chunks)

  • sources::Vector{<:AbstractString}: sources of the chunks

  • extras::Union{Nothing, AbstractVector}: additional data, eg, metadata, source code, etc.

Example

We can easily create a keywords-based index from a standard embeddings-based index.

julia

+    
Skip to content

Reference for RAGTools

# PromptingTools.Experimental.RAGToolsModule.
julia
RAGTools

Provides Retrieval-Augmented Generation (RAG) functionality.

Requires: LinearAlgebra, SparseArrays, Unicode, PromptingTools for proper functionality.

This module is experimental and may change at any time. It is intended to be moved to a separate package in the future.

source


# PromptingTools.Experimental.RAGTools.AbstractCandidateChunksType.
julia
AbstractCandidateChunks

Abstract type for storing candidate chunks, ie, references to items in a AbstractChunkIndex.

Return type from find_closest and find_tags functions.

Required Fields

  • index_id::Symbol: the id of the index from which the candidates are drawn

  • positions::Vector{Int}: the positions of the candidates in the index

  • scores::Vector{Float32}: the similarity scores of the candidates from the query (higher is better)

source


# PromptingTools.Experimental.RAGTools.AbstractChunkIndexType.
julia
AbstractChunkIndex <: AbstractDocumentIndex

Main abstract type for storing document chunks and their embeddings. It also stores tags and sources for each chunk.

Required Fields

  • id::Symbol: unique identifier of each index (to ensure we're using the right index with CandidateChunks)

  • chunks::Vector{<:AbstractString}: underlying document chunks / snippets

  • embeddings::Union{Nothing, Matrix{<:Real}}: for semantic search

  • tags::Union{Nothing, AbstractMatrix{<:Bool}}: for exact search, filtering, etc. This is often a sparse matrix indicating which chunks have the given tag (see tag_vocab for the position lookup)

  • tags_vocab::Union{Nothing, Vector{<:AbstractString}}: vocabulary for the tags matrix (each column in tags is one item in tags_vocab and rows are the chunks)

  • sources::Vector{<:AbstractString}: sources of the chunks

  • extras::Union{Nothing, AbstractVector}: additional data, eg, metadata, source code, etc.

source


# PromptingTools.Experimental.RAGTools.AbstractGeneratorType.
julia
AbstractGenerator <: AbstractGenerationMethod

Abstract type for generating an answer with generate! (use to change the process / return type of generate).

Required Fields

  • contexter::AbstractContextBuilder: the context building method, dispatching `build_context!

  • answerer::AbstractAnswerer: the answer generation method, dispatching answer!

  • refiner::AbstractRefiner: the answer refining method, dispatching refine!

  • postprocessor::AbstractPostprocessor: the postprocessing method, dispatching postprocess!

source


# PromptingTools.Experimental.RAGTools.AbstractIndexBuilderType.
julia
AbstractIndexBuilder

Abstract type for building an index with build_index (use to change the process / return type of build_index).

Required Fields

  • chunker::AbstractChunker: the chunking method, dispatching get_chunks

  • embedder::AbstractEmbedder: the embedding method, dispatching get_embeddings

  • tagger::AbstractTagger: the tagging method, dispatching get_tags

source


# PromptingTools.Experimental.RAGTools.AbstractMultiIndexType.
julia
AbstractMultiIndex <: AbstractDocumentIndex

Experimental abstract type for storing multiple document indexes. Not yet implemented.

source


# PromptingTools.Experimental.RAGTools.AbstractRetrieverType.
julia
AbstractRetriever <: AbstractRetrievalMethod

Abstract type for retrieving chunks from an index with retrieve (use to change the process / return type of retrieve).

Required Fields

  • rephraser::AbstractRephraser: the rephrasing method, dispatching rephrase

  • finder::AbstractSimilarityFinder: the similarity search method, dispatching find_closest

  • filter::AbstractTagFilter: the tag matching method, dispatching find_tags

  • reranker::AbstractReranker: the reranking method, dispatching rerank

source


# PromptingTools.Experimental.RAGTools.AdvancedGeneratorType.
julia
AdvancedGenerator <: AbstractGenerator

Default implementation for generate!. It simply enumerates context snippets and runs aigenerate (no refinement).

It uses ContextEnumerator, SimpleAnswerer, SimpleRefiner, and NoPostprocessor as default contexter, answerer, refiner, and postprocessor.

source


# PromptingTools.Experimental.RAGTools.AdvancedRetrieverType.
julia
AdvancedRetriever <: AbstractRetriever

Dispatch for retrieve with advanced retrieval methods to improve result quality. Compared to SimpleRetriever, it adds rephrasing the query and reranking the results.

Fields

  • rephraser::AbstractRephraser: the rephrasing method, dispatching rephrase - uses HyDERephraser

  • embedder::AbstractEmbedder: the embedding method, dispatching get_embeddings (see Preparation Stage for more details) - uses BatchEmbedder

  • processor::AbstractProcessor: the processor method, dispatching get_keywords (see Preparation Stage for more details) - uses NoProcessor

  • finder::AbstractSimilarityFinder: the similarity search method, dispatching find_closest - uses CosineSimilarity

  • tagger::AbstractTagger: the tag generating method, dispatching get_tags (see Preparation Stage for more details) - uses NoTagger

  • filter::AbstractTagFilter: the tag matching method, dispatching find_tags - uses NoTagFilter

  • reranker::AbstractReranker: the reranking method, dispatching rerank - uses CohereReranker

source


# PromptingTools.Experimental.RAGTools.AllTagFilterType.
julia
AllTagFilter <: AbstractTagFilter

Finds the chunks that have ALL OF the specified tag(s). A method for find_tags.

source


# PromptingTools.Experimental.RAGTools.AnnotatedNodeType.
julia
AnnotatedNode{T}  <: AbstractAnnotatedNode

A node to add annotations to the generated answer in airag

Annotations can be: sources, scores, whether its supported or not by the context, etc.

Fields

  • group_id::Int: Unique identifier for the same group of nodes (eg, different lines of the same code block)

  • parent::Union{AnnotatedNode, Nothing}: Parent node that current node was built on

  • children::Vector{AnnotatedNode}: Children nodes

  • `score::

source


# PromptingTools.Experimental.RAGTools.AnyTagFilterType.
julia
AnyTagFilter <: AbstractTagFilter

Finds the chunks that have ANY OF the specified tag(s). A method for find_tags.

source


# PromptingTools.Experimental.RAGTools.BM25SimilarityType.
julia
BM25Similarity <: AbstractSimilarityFinder

Finds the closest chunks to a query embedding by measuring the BM25 similarity between the query and the chunks' embeddings in binary form. A method for find_closest.

Reference: Wikipedia: BM25. Implementation follows: The Next Generation of Lucene Relevance.

source


# PromptingTools.Experimental.RAGTools.BatchEmbedderType.
julia
BatchEmbedder <: AbstractEmbedder

Default embedder for get_embeddings functions. It passes individual documents to be embedded in chunks to aiembed.

source


# PromptingTools.Experimental.RAGTools.BinaryBatchEmbedderType.
julia
BinaryBatchEmbedder <: AbstractEmbedder

Same as BatchEmbedder but reduces the embeddings matrix to a binary form (eg, BitMatrix). Defines a method for get_embeddings.

Reference: HuggingFace: Embedding Quantization.

source


# PromptingTools.Experimental.RAGTools.BinaryCosineSimilarityType.
julia
BinaryCosineSimilarity <: AbstractSimilarityFinder

Finds the closest chunks to a query embedding by measuring the Hamming distance AND cosine similarity between the query and the chunks' embeddings in binary form. A method for find_closest.

It follows the two-pass approach:

  • First pass: Hamming distance in binary form to get the top_k * rescore_multiplier (ie, more than top_k) candidates.

  • Second pass: Rescore the candidates with float embeddings and return the top_k.

Reference: HuggingFace: Embedding Quantization.

source


# PromptingTools.Experimental.RAGTools.BitPackedBatchEmbedderType.
julia
BitPackedBatchEmbedder <: AbstractEmbedder

Same as BatchEmbedder but reduces the embeddings matrix to a binary form packed in UInt64 (eg, BitMatrix.chunks). Defines a method for get_embeddings.

See also utilities pack_bits and unpack_bits to move between packed/non-packed binary forms.

Reference: HuggingFace: Embedding Quantization.

source


# PromptingTools.Experimental.RAGTools.BitPackedCosineSimilarityType.
julia
BitPackedCosineSimilarity <: AbstractSimilarityFinder

Finds the closest chunks to a query embedding by measuring the Hamming distance AND cosine similarity between the query and the chunks' embeddings in binary form. A method for find_closest.

The difference to BinaryCosineSimilarity is that the binary values are packed into UInt64, which is more efficient.

Reference: HuggingFace: Embedding Quantization. Implementation of hamming_distance is based on TinyRAG.

source


# PromptingTools.Experimental.RAGTools.CandidateChunksType.
julia
CandidateChunks

A struct for storing references to chunks in the given index (identified by index_id) called positions and scores holding the strength of similarity (=1 is the highest, most similar). It's the result of the retrieval stage of RAG.

Fields

  • index_id::Symbol: the id of the index from which the candidates are drawn

  • positions::Vector{Int}: the positions of the candidates in the index (ie, 5 refers to the 5th chunk in the index - chunks(index)[5])

  • scores::Vector{Float32}: the similarity scores of the candidates from the query (higher is better)

source


# PromptingTools.Experimental.RAGTools.ChunkEmbeddingsIndexType.
julia
ChunkEmbeddingsIndex

Main struct for storing document chunks and their embeddings. It also stores tags and sources for each chunk.

Previously, this struct was called ChunkIndex.

Fields

  • id::Symbol: unique identifier of each index (to ensure we're using the right index with CandidateChunks)

  • chunks::Vector{<:AbstractString}: underlying document chunks / snippets

  • embeddings::Union{Nothing, Matrix{<:Real}}: for semantic search

  • tags::Union{Nothing, AbstractMatrix{<:Bool}}: for exact search, filtering, etc. This is often a sparse matrix indicating which chunks have the given tag (see tag_vocab for the position lookup)

  • tags_vocab::Union{Nothing, Vector{<:AbstractString}}: vocabulary for the tags matrix (each column in tags is one item in tags_vocab and rows are the chunks)

  • sources::Vector{<:AbstractString}: sources of the chunks

  • extras::Union{Nothing, AbstractVector}: additional data, eg, metadata, source code, etc.

source


# PromptingTools.Experimental.RAGTools.ChunkKeywordsIndexType.
julia
ChunkKeywordsIndex

Struct for storing chunks of text and associated keywords for BM25 similarity search.

Fields

  • id::Symbol: unique identifier of each index (to ensure we're using the right index with CandidateChunks)

  • chunks::Vector{<:AbstractString}: underlying document chunks / snippets

  • chunkdata::Union{Nothing, AbstractMatrix{<:Real}}: for similarity search, assumed to be DocumentTermMatrix

  • tags::Union{Nothing, AbstractMatrix{<:Bool}}: for exact search, filtering, etc. This is often a sparse matrix indicating which chunks have the given tag (see tag_vocab for the position lookup)

  • tags_vocab::Union{Nothing, Vector{<:AbstractString}}: vocabulary for the tags matrix (each column in tags is one item in tags_vocab and rows are the chunks)

  • sources::Vector{<:AbstractString}: sources of the chunks

  • extras::Union{Nothing, AbstractVector}: additional data, eg, metadata, source code, etc.

Example

We can easily create a keywords-based index from a standard embeddings-based index.

julia

 # Let's assume we have a standard embeddings-based index
 index = build_index(SimpleIndexer(), texts; chunker_kwargs = (; max_length=10))
 
@@ -33,7 +33,7 @@
 result = retrieve(retriever, index_keywords, "What are the best practices for parallel computing in Julia?")
 result.context

If you want to use airag, don't forget to specify the config to make sure keywords are processed (ie, tokenized) and that BM25 is used for searching candidates

julia
cfg = RAGConfig(; retriever = SimpleBM25Retriever());
 airag(cfg, index_keywords;
-    question = "What are the best practices for parallel computing in Julia?")

source


# PromptingTools.Experimental.RAGTools.ChunkKeywordsIndexMethod.
julia
ChunkKeywordsIndex(
+    question = "What are the best practices for parallel computing in Julia?")

source


# PromptingTools.Experimental.RAGTools.ChunkKeywordsIndexMethod.
julia
ChunkKeywordsIndex(
     [processor::AbstractProcessor=KeywordsProcessor(),] index::ChunkEmbeddingsIndex; verbose::Int = 1,
     index_id = gensym("ChunkKeywordsIndex"), processor_kwargs...)

Convenience method to quickly create a ChunkKeywordsIndex from an existing ChunkEmbeddingsIndex.

Example

julia

 # Let's assume we have a standard embeddings-based index
@@ -43,7 +43,7 @@
 index_keywords = ChunkKeywordsIndex(index)
 
 # We can immediately create a MultiIndex (a hybrid index holding both indices)
-multi_index = MultiIndex([index, index_keywords])

source


# PromptingTools.Experimental.RAGTools.CohereRerankerType.
julia
CohereReranker <: AbstractReranker

Rerank strategy using the Cohere Rerank API. Requires an API key. A method for rerank.

source


# PromptingTools.Experimental.RAGTools.ContextEnumeratorType.
julia
ContextEnumerator <: AbstractContextBuilder

Default method for build_context! method. It simply enumerates the context snippets around each position in candidates. When possibly, it will add surrounding chunks (from the same source).

source


# PromptingTools.Experimental.RAGTools.CosineSimilarityType.
julia
CosineSimilarity <: AbstractSimilarityFinder

Finds the closest chunks to a query embedding by measuring the cosine similarity between the query and the chunks' embeddings. A method for find_closest (see the docstring for more details and usage example).

source


# PromptingTools.Experimental.RAGTools.DocumentTermMatrixType.
julia
DocumentTermMatrix{T<:AbstractString}

A sparse matrix of term frequencies and document lengths to allow calculation of BM25 similarity scores.

source


# PromptingTools.Experimental.RAGTools.FileChunkerType.
julia
FileChunker <: AbstractChunker

Chunker when you provide file paths to get_chunks functions.

Ie, the inputs will be validated first (eg, file exists, etc) and then read into memory.

Set as default chunker in get_chunks functions.

source


# PromptingTools.Experimental.RAGTools.FlashRankerType.
julia
FlashRanker <: AbstractReranker

Rerank strategy using the package FlashRank.jl and local models. A method for rerank.

You must first import the FlashRank.jl package. To automatically download any required models, set your ENV["DATADEPS_ALWAYS_ACCEPT"] = true (see DataDeps for more details).

Example

julia
using FlashRank
+multi_index = MultiIndex([index, index_keywords])

source


# PromptingTools.Experimental.RAGTools.CohereRerankerType.
julia
CohereReranker <: AbstractReranker

Rerank strategy using the Cohere Rerank API. Requires an API key. A method for rerank.

source


# PromptingTools.Experimental.RAGTools.ContextEnumeratorType.
julia
ContextEnumerator <: AbstractContextBuilder

Default method for build_context! method. It simply enumerates the context snippets around each position in candidates. When possibly, it will add surrounding chunks (from the same source).

source


# PromptingTools.Experimental.RAGTools.CosineSimilarityType.
julia
CosineSimilarity <: AbstractSimilarityFinder

Finds the closest chunks to a query embedding by measuring the cosine similarity between the query and the chunks' embeddings. A method for find_closest (see the docstring for more details and usage example).

source


# PromptingTools.Experimental.RAGTools.DocumentTermMatrixType.
julia
DocumentTermMatrix{T<:AbstractString}

A sparse matrix of term frequencies and document lengths to allow calculation of BM25 similarity scores.

source


# PromptingTools.Experimental.RAGTools.FileChunkerType.
julia
FileChunker <: AbstractChunker

Chunker when you provide file paths to get_chunks functions.

Ie, the inputs will be validated first (eg, file exists, etc) and then read into memory.

Set as default chunker in get_chunks functions.

source


# PromptingTools.Experimental.RAGTools.FlashRankerType.
julia
FlashRanker <: AbstractReranker

Rerank strategy using the package FlashRank.jl and local models. A method for rerank.

You must first import the FlashRank.jl package. To automatically download any required models, set your ENV["DATADEPS_ALWAYS_ACCEPT"] = true (see DataDeps for more details).

Example

julia
using FlashRank
 
 # Wrap the model to be a valid Ranker recognized by RAGTools
 # It will be provided to the airag/rerank function to avoid instantiating it on every call
@@ -55,7 +55,7 @@
 
 # Ask a question (assumes you have some `index`)
 question = "What are the best practices for parallel computing in Julia?"
-result = airag(cfg, index; question, return_all = true)

source


# PromptingTools.Experimental.RAGTools.HTMLStylerType.
julia
HTMLStyler

Defines styling via classes (attribute class) and styles (attribute style) for HTML formatting of AbstractAnnotatedNode

source


# PromptingTools.Experimental.RAGTools.HyDERephraserType.
julia
HyDERephraser <: AbstractRephraser

Rephraser implemented using the provided AI Template (eg, ...) and standard chat model. A method for rephrase.

It uses a prompt-based rephrasing method called HyDE (Hypothetical Document Embedding), where instead of looking for an embedding of the question, we look for the documents most similar to a synthetic passage that would be a good answer to our question.

Reference: Arxiv paper.

source


# PromptingTools.Experimental.RAGTools.JudgeAllScoresType.

final_rating is the average of all scoring criteria. Explain the final_rating in rationale

source


# PromptingTools.Experimental.RAGTools.JudgeRatingType.

Provide the final_rating between 1-5. Provide the rationale for it.

source


# PromptingTools.Experimental.RAGTools.KeywordsIndexerType.
julia
KeywordsIndexer <: AbstractIndexBuilder

Keyword-based index (BM25) to be returned by build_index.

It uses TextChunker, KeywordsProcessor, and NoTagger as default chunker, processor, and tagger.

source


# PromptingTools.Experimental.RAGTools.KeywordsProcessorType.
julia
KeywordsProcessor <: AbstractProcessor

Default keywords processor for get_keywords functions. It normalizes the documents, tokenizes them and builds a DocumentTermMatrix.

source


# PromptingTools.Experimental.RAGTools.MultiCandidateChunksType.
julia
MultiCandidateChunks

A struct for storing references to multiple sets of chunks across different indices. Each set of chunks is identified by an index_id in index_ids, with corresponding positions in the index and scores indicating the strength of similarity.

This struct is useful for scenarios where candidates are drawn from multiple indices, and there is a need to keep track of which candidates came from which index.

Fields

  • index_ids::Vector{Symbol}: the ids of the indices from which the candidates are drawn

  • positions::Vector{TP}: the positions of the candidates in their respective indices

  • scores::Vector{TD}: the similarity scores of the candidates from the query

source


# PromptingTools.Experimental.RAGTools.MultiFinderType.
julia
MultiFinder <: AbstractSimilarityFinder

Composite finder for MultiIndex where we want to set multiple finders for each index. A method for find_closest. Positions correspond to indexes(::MultiIndex).

source


# PromptingTools.Experimental.RAGTools.MultiIndexType.
julia
MultiIndex

Composite index that stores multiple ChunkIndex objects and their embeddings.

Fields

  • id::Symbol: unique identifier of each index (to ensure we're using the right index with CandidateChunks)

  • indexes::Vector{<:AbstractChunkIndex}: the indexes to be combined

Use accesor indexes to access the individual indexes.

Examples

We can create a MultiIndex from a vector of AbstractChunkIndex objects.

julia
index = build_index(SimpleIndexer(), texts; chunker_kwargs = (; sources))
+result = airag(cfg, index; question, return_all = true)

source


# PromptingTools.Experimental.RAGTools.HTMLStylerType.
julia
HTMLStyler

Defines styling via classes (attribute class) and styles (attribute style) for HTML formatting of AbstractAnnotatedNode

source


# PromptingTools.Experimental.RAGTools.HyDERephraserType.
julia
HyDERephraser <: AbstractRephraser

Rephraser implemented using the provided AI Template (eg, ...) and standard chat model. A method for rephrase.

It uses a prompt-based rephrasing method called HyDE (Hypothetical Document Embedding), where instead of looking for an embedding of the question, we look for the documents most similar to a synthetic passage that would be a good answer to our question.

Reference: Arxiv paper.

source


# PromptingTools.Experimental.RAGTools.JudgeAllScoresType.

final_rating is the average of all scoring criteria. Explain the final_rating in rationale

source


# PromptingTools.Experimental.RAGTools.JudgeRatingType.

Provide the final_rating between 1-5. Provide the rationale for it.

source


# PromptingTools.Experimental.RAGTools.KeywordsIndexerType.
julia
KeywordsIndexer <: AbstractIndexBuilder

Keyword-based index (BM25) to be returned by build_index.

It uses TextChunker, KeywordsProcessor, and NoTagger as default chunker, processor, and tagger.

source


# PromptingTools.Experimental.RAGTools.KeywordsProcessorType.
julia
KeywordsProcessor <: AbstractProcessor

Default keywords processor for get_keywords functions. It normalizes the documents, tokenizes them and builds a DocumentTermMatrix.

source


# PromptingTools.Experimental.RAGTools.MultiCandidateChunksType.
julia
MultiCandidateChunks

A struct for storing references to multiple sets of chunks across different indices. Each set of chunks is identified by an index_id in index_ids, with corresponding positions in the index and scores indicating the strength of similarity.

This struct is useful for scenarios where candidates are drawn from multiple indices, and there is a need to keep track of which candidates came from which index.

Fields

  • index_ids::Vector{Symbol}: the ids of the indices from which the candidates are drawn

  • positions::Vector{TP}: the positions of the candidates in their respective indices

  • scores::Vector{TD}: the similarity scores of the candidates from the query

source


# PromptingTools.Experimental.RAGTools.MultiFinderType.
julia
MultiFinder <: AbstractSimilarityFinder

Composite finder for MultiIndex where we want to set multiple finders for each index. A method for find_closest. Positions correspond to indexes(::MultiIndex).

source


# PromptingTools.Experimental.RAGTools.MultiIndexType.
julia
MultiIndex

Composite index that stores multiple ChunkIndex objects and their embeddings.

Fields

  • id::Symbol: unique identifier of each index (to ensure we're using the right index with CandidateChunks)

  • indexes::Vector{<:AbstractChunkIndex}: the indexes to be combined

Use accesor indexes to access the individual indexes.

Examples

We can create a MultiIndex from a vector of AbstractChunkIndex objects.

julia
index = build_index(SimpleIndexer(), texts; chunker_kwargs = (; sources))
 index_keywords = ChunkKeywordsIndex(index) # same chunks as above but adds BM25 instead of embeddings
 
 multi_index = MultiIndex([index, index_keywords])

To use airag with different types of indices, we need to specify how to find the closest items for each index

julia
# Cosine similarity for embeddings and BM25 for keywords, same order as indexes in MultiIndex
@@ -66,7 +66,7 @@
 
 # Ask questions
 msg = airag(cfg, multi_index; question = "What are the best practices for parallel computing in Julia?")
-pprint(msg) # prettify the answer

source


# PromptingTools.Experimental.RAGTools.NoEmbedderType.
julia
NoEmbedder <: AbstractEmbedder

No-op embedder for get_embeddings functions. It returns nothing.

source


# PromptingTools.Experimental.RAGTools.NoPostprocessorType.
julia
NoPostprocessor <: AbstractPostprocessor

Default method for postprocess! method. A passthrough option that returns the result without any changes.

Overload this method to add custom postprocessing steps, eg, logging, saving conversations to disk, etc.

source


# PromptingTools.Experimental.RAGTools.NoProcessorType.
julia
NoProcessor <: AbstractProcessor

No-op processor for get_keywords functions. It returns the inputs as is.

source


# PromptingTools.Experimental.RAGTools.NoRefinerType.
julia
NoRefiner <: AbstractRefiner

Default method for refine! method. A passthrough option that returns the result.answer without any changes.

source


# PromptingTools.Experimental.RAGTools.NoRephraserType.
julia
NoRephraser <: AbstractRephraser

No-op implementation for rephrase, which simply passes the question through.

source


# PromptingTools.Experimental.RAGTools.NoRerankerType.
julia
NoReranker <: AbstractReranker

No-op implementation for rerank, which simply passes the candidate chunks through.

source


# PromptingTools.Experimental.RAGTools.NoTagFilterType.
julia
NoTagFilter <: AbstractTagFilter

No-op implementation for find_tags, which simply returns all chunks.

source


# PromptingTools.Experimental.RAGTools.NoTaggerType.
julia
NoTagger <: AbstractTagger

No-op tagger for get_tags functions. It returns (nothing, nothing).

source


# PromptingTools.Experimental.RAGTools.OpenTaggerType.
julia
OpenTagger <: AbstractTagger

Tagger for get_tags functions, which generates possible tags for each chunk via aiextract. You can customize it via prompt template (default: :RAGExtractMetadataShort), but it's quite open-ended (ie, AI decides the possible tags).

source


# PromptingTools.Experimental.RAGTools.PassthroughTaggerType.
julia
PassthroughTagger <: AbstractTagger

Tagger for get_tags functions, which passes tags directly as Vector of Vectors of strings (ie, tags[i] is the tags for docs[i]).

source


# PromptingTools.Experimental.RAGTools.RAGConfigType.
julia
RAGConfig <: AbstractRAGConfig

Default configuration for RAG. It uses SimpleIndexer, SimpleRetriever, and SimpleGenerator as default components. Provided as the first argument in airag.

To customize the components, replace corresponding fields for each step of the RAG pipeline (eg, use subtypes(AbstractIndexBuilder) to find the available options).

source


# PromptingTools.Experimental.RAGTools.RAGResultType.
julia
RAGResult

A struct for debugging RAG answers. It contains the question, answer, context, and the candidate chunks at each step of the RAG pipeline.

Think of the flow as question -> rephrased_questions -> answer -> final_answer with the context and candidate chunks helping along the way.

Fields

  • question::AbstractString: the original question

  • rephrased_questions::Vector{<:AbstractString}: a vector of rephrased questions (eg, HyDe, Multihop, etc.)

  • answer::AbstractString: the generated answer

  • final_answer::AbstractString: the refined final answer (eg, after CorrectiveRAG), also considered the FINAL answer (it must be always available)

  • context::Vector{<:AbstractString}: the context used for retrieval (ie, the vector of chunks and their surrounding window if applicable)

  • sources::Vector{<:AbstractString}: the sources of the context (for the original matched chunks)

  • emb_candidates::CandidateChunks: the candidate chunks from the embedding index (from find_closest)

  • tag_candidates::Union{Nothing, CandidateChunks}: the candidate chunks from the tag index (from find_tags)

  • filtered_candidates::CandidateChunks: the filtered candidate chunks (intersection of emb_candidates and tag_candidates)

  • reranked_candidates::CandidateChunks: the reranked candidate chunks (from rerank)

  • conversations::Dict{Symbol,Vector{<:AbstractMessage}}: the conversation history for AI steps of the RAG pipeline, use keys that correspond to the function names, eg, :answer or :refine

See also: pprint (pretty printing), annotate_support (for annotating the answer)

source


# PromptingTools.Experimental.RAGTools.RankGPTRerankerType.
julia
RankGPTReranker <: AbstractReranker

Rerank strategy using the RankGPT algorithm (calling LLMs). A method for rerank.

Reference

[1] Is ChatGPT Good at Search? Investigating Large Language Models as Re-Ranking Agents by W. Sun et al. [2] RankGPT Github

source


# PromptingTools.Experimental.RAGTools.RankGPTResultType.
julia
RankGPTResult

Results from the RankGPT algorithm.

Fields

  • question::String: The question that was asked.

  • chunks::AbstractVector{T}: The chunks that were ranked (=context).

  • positions::Vector{Int}: The ranking of the chunks (referring to the chunks).

  • elapsed::Float64: The time it took to rank the chunks.

  • cost::Float64: The cumulative cost of the ranking.

  • tokens::Int: The cumulative number of tokens used in the ranking.

source


# PromptingTools.Experimental.RAGTools.SimpleAnswererType.
julia
SimpleAnswerer <: AbstractAnswerer

Default method for answer! method. Generates an answer using the aigenerate function with the provided context and question.

source


# PromptingTools.Experimental.RAGTools.SimpleBM25RetrieverType.
julia
SimpleBM25Retriever <: AbstractRetriever

Keyword-based implementation for retrieve. It does a simple similarity search via BM25Similarity and returns the results.

Make sure to use consistent processor and tagger with the Preparation Stage (build_index)!

Fields

  • rephraser::AbstractRephraser: the rephrasing method, dispatching rephrase - uses NoRephraser

  • embedder::AbstractEmbedder: the embedding method, dispatching get_embeddings (see Preparation Stage for more details) - uses NoEmbedder

  • processor::AbstractProcessor: the processor method, dispatching get_keywords (see Preparation Stage for more details) - uses KeywordsProcessor

  • finder::AbstractSimilarityFinder: the similarity search method, dispatching find_closest - uses CosineSimilarity

  • tagger::AbstractTagger: the tag generating method, dispatching get_tags (see Preparation Stage for more details) - uses NoTagger

  • filter::AbstractTagFilter: the tag matching method, dispatching find_tags - uses NoTagFilter

  • reranker::AbstractReranker: the reranking method, dispatching rerank - uses NoReranker

source


# PromptingTools.Experimental.RAGTools.SimpleGeneratorType.
julia
SimpleGenerator <: AbstractGenerator

Default implementation for generate. It simply enumerates context snippets and runs aigenerate (no refinement).

It uses ContextEnumerator, SimpleAnswerer, NoRefiner, and NoPostprocessor as default contexter, answerer, refiner, and postprocessor.

source


# PromptingTools.Experimental.RAGTools.SimpleIndexerType.
julia
SimpleIndexer <: AbstractIndexBuilder

Default implementation for build_index.

It uses TextChunker, BatchEmbedder, and NoTagger as default chunker, embedder, and tagger.

source


# PromptingTools.Experimental.RAGTools.SimpleRefinerType.
julia
SimpleRefiner <: AbstractRefiner

Refines the answer using the same context previously provided via the provided prompt template. A method for refine!.

source


# PromptingTools.Experimental.RAGTools.SimpleRephraserType.
julia
SimpleRephraser <: AbstractRephraser

Rephraser implemented using the provided AI Template (eg, ...) and standard chat model. A method for rephrase.

source


# PromptingTools.Experimental.RAGTools.SimpleRetrieverType.
julia
SimpleRetriever <: AbstractRetriever

Default implementation for retrieve function. It does a simple similarity search via CosineSimilarity and returns the results.

Make sure to use consistent embedder and tagger with the Preparation Stage (build_index)!

Fields

  • rephraser::AbstractRephraser: the rephrasing method, dispatching rephrase - uses NoRephraser

  • embedder::AbstractEmbedder: the embedding method, dispatching get_embeddings (see Preparation Stage for more details) - uses BatchEmbedder

  • processor::AbstractProcessor: the processor method, dispatching get_keywords (see Preparation Stage for more details) - uses NoProcessor

  • finder::AbstractSimilarityFinder: the similarity search method, dispatching find_closest - uses CosineSimilarity

  • tagger::AbstractTagger: the tag generating method, dispatching get_tags (see Preparation Stage for more details) - uses NoTagger

  • filter::AbstractTagFilter: the tag matching method, dispatching find_tags - uses NoTagFilter

  • reranker::AbstractReranker: the reranking method, dispatching rerank - uses NoReranker

source


# PromptingTools.Experimental.RAGTools.StylerType.
julia
Styler

Defines styling keywords for printstyled for each AbstractAnnotatedNode

source


# PromptingTools.Experimental.RAGTools.SubChunkIndexType.
julia
SubChunkIndex

A view of the parent index with respect to the chunks (and chunk-aligned fields). All methods and accessors working for AbstractChunkIndex also work for SubChunkIndex. It does not yet work for MultiIndex.

Fields

  • parent::AbstractChunkIndex: the parent index from which the chunks are drawn (always the original index, never a view)

  • positions::Vector{Int}: the positions of the chunks in the parent index (always refers to original PARENT index, even if we create a view of the view)

Example

julia
cc = CandidateChunks(index.id, 1:10)
+pprint(msg) # prettify the answer

source


# PromptingTools.Experimental.RAGTools.NoEmbedderType.
julia
NoEmbedder <: AbstractEmbedder

No-op embedder for get_embeddings functions. It returns nothing.

source


# PromptingTools.Experimental.RAGTools.NoPostprocessorType.
julia
NoPostprocessor <: AbstractPostprocessor

Default method for postprocess! method. A passthrough option that returns the result without any changes.

Overload this method to add custom postprocessing steps, eg, logging, saving conversations to disk, etc.

source


# PromptingTools.Experimental.RAGTools.NoProcessorType.
julia
NoProcessor <: AbstractProcessor

No-op processor for get_keywords functions. It returns the inputs as is.

source


# PromptingTools.Experimental.RAGTools.NoRefinerType.
julia
NoRefiner <: AbstractRefiner

Default method for refine! method. A passthrough option that returns the result.answer without any changes.

source


# PromptingTools.Experimental.RAGTools.NoRephraserType.
julia
NoRephraser <: AbstractRephraser

No-op implementation for rephrase, which simply passes the question through.

source


# PromptingTools.Experimental.RAGTools.NoRerankerType.
julia
NoReranker <: AbstractReranker

No-op implementation for rerank, which simply passes the candidate chunks through.

source


# PromptingTools.Experimental.RAGTools.NoTagFilterType.
julia
NoTagFilter <: AbstractTagFilter

No-op implementation for find_tags, which simply returns all chunks.

source


# PromptingTools.Experimental.RAGTools.NoTaggerType.
julia
NoTagger <: AbstractTagger

No-op tagger for get_tags functions. It returns (nothing, nothing).

source


# PromptingTools.Experimental.RAGTools.OpenTaggerType.
julia
OpenTagger <: AbstractTagger

Tagger for get_tags functions, which generates possible tags for each chunk via aiextract. You can customize it via prompt template (default: :RAGExtractMetadataShort), but it's quite open-ended (ie, AI decides the possible tags).

source


# PromptingTools.Experimental.RAGTools.PassthroughTaggerType.
julia
PassthroughTagger <: AbstractTagger

Tagger for get_tags functions, which passes tags directly as Vector of Vectors of strings (ie, tags[i] is the tags for docs[i]).

source


# PromptingTools.Experimental.RAGTools.RAGConfigType.
julia
RAGConfig <: AbstractRAGConfig

Default configuration for RAG. It uses SimpleIndexer, SimpleRetriever, and SimpleGenerator as default components. Provided as the first argument in airag.

To customize the components, replace corresponding fields for each step of the RAG pipeline (eg, use subtypes(AbstractIndexBuilder) to find the available options).

source


# PromptingTools.Experimental.RAGTools.RAGResultType.
julia
RAGResult

A struct for debugging RAG answers. It contains the question, answer, context, and the candidate chunks at each step of the RAG pipeline.

Think of the flow as question -> rephrased_questions -> answer -> final_answer with the context and candidate chunks helping along the way.

Fields

  • question::AbstractString: the original question

  • rephrased_questions::Vector{<:AbstractString}: a vector of rephrased questions (eg, HyDe, Multihop, etc.)

  • answer::AbstractString: the generated answer

  • final_answer::AbstractString: the refined final answer (eg, after CorrectiveRAG), also considered the FINAL answer (it must be always available)

  • context::Vector{<:AbstractString}: the context used for retrieval (ie, the vector of chunks and their surrounding window if applicable)

  • sources::Vector{<:AbstractString}: the sources of the context (for the original matched chunks)

  • emb_candidates::CandidateChunks: the candidate chunks from the embedding index (from find_closest)

  • tag_candidates::Union{Nothing, CandidateChunks}: the candidate chunks from the tag index (from find_tags)

  • filtered_candidates::CandidateChunks: the filtered candidate chunks (intersection of emb_candidates and tag_candidates)

  • reranked_candidates::CandidateChunks: the reranked candidate chunks (from rerank)

  • conversations::Dict{Symbol,Vector{<:AbstractMessage}}: the conversation history for AI steps of the RAG pipeline, use keys that correspond to the function names, eg, :answer or :refine

See also: pprint (pretty printing), annotate_support (for annotating the answer)

source


# PromptingTools.Experimental.RAGTools.RankGPTRerankerType.
julia
RankGPTReranker <: AbstractReranker

Rerank strategy using the RankGPT algorithm (calling LLMs). A method for rerank.

Reference

[1] Is ChatGPT Good at Search? Investigating Large Language Models as Re-Ranking Agents by W. Sun et al. [2] RankGPT Github

source


# PromptingTools.Experimental.RAGTools.RankGPTResultType.
julia
RankGPTResult

Results from the RankGPT algorithm.

Fields

  • question::String: The question that was asked.

  • chunks::AbstractVector{T}: The chunks that were ranked (=context).

  • positions::Vector{Int}: The ranking of the chunks (referring to the chunks).

  • elapsed::Float64: The time it took to rank the chunks.

  • cost::Float64: The cumulative cost of the ranking.

  • tokens::Int: The cumulative number of tokens used in the ranking.

source


# PromptingTools.Experimental.RAGTools.SimpleAnswererType.
julia
SimpleAnswerer <: AbstractAnswerer

Default method for answer! method. Generates an answer using the aigenerate function with the provided context and question.

source


# PromptingTools.Experimental.RAGTools.SimpleBM25RetrieverType.
julia
SimpleBM25Retriever <: AbstractRetriever

Keyword-based implementation for retrieve. It does a simple similarity search via BM25Similarity and returns the results.

Make sure to use consistent processor and tagger with the Preparation Stage (build_index)!

Fields

  • rephraser::AbstractRephraser: the rephrasing method, dispatching rephrase - uses NoRephraser

  • embedder::AbstractEmbedder: the embedding method, dispatching get_embeddings (see Preparation Stage for more details) - uses NoEmbedder

  • processor::AbstractProcessor: the processor method, dispatching get_keywords (see Preparation Stage for more details) - uses KeywordsProcessor

  • finder::AbstractSimilarityFinder: the similarity search method, dispatching find_closest - uses CosineSimilarity

  • tagger::AbstractTagger: the tag generating method, dispatching get_tags (see Preparation Stage for more details) - uses NoTagger

  • filter::AbstractTagFilter: the tag matching method, dispatching find_tags - uses NoTagFilter

  • reranker::AbstractReranker: the reranking method, dispatching rerank - uses NoReranker

source


# PromptingTools.Experimental.RAGTools.SimpleGeneratorType.
julia
SimpleGenerator <: AbstractGenerator

Default implementation for generate. It simply enumerates context snippets and runs aigenerate (no refinement).

It uses ContextEnumerator, SimpleAnswerer, NoRefiner, and NoPostprocessor as default contexter, answerer, refiner, and postprocessor.

source


# PromptingTools.Experimental.RAGTools.SimpleIndexerType.
julia
SimpleIndexer <: AbstractIndexBuilder

Default implementation for build_index.

It uses TextChunker, BatchEmbedder, and NoTagger as default chunker, embedder, and tagger.

source


# PromptingTools.Experimental.RAGTools.SimpleRefinerType.
julia
SimpleRefiner <: AbstractRefiner

Refines the answer using the same context previously provided via the provided prompt template. A method for refine!.

source


# PromptingTools.Experimental.RAGTools.SimpleRephraserType.
julia
SimpleRephraser <: AbstractRephraser

Rephraser implemented using the provided AI Template (eg, ...) and standard chat model. A method for rephrase.

source


# PromptingTools.Experimental.RAGTools.SimpleRetrieverType.
julia
SimpleRetriever <: AbstractRetriever

Default implementation for retrieve function. It does a simple similarity search via CosineSimilarity and returns the results.

Make sure to use consistent embedder and tagger with the Preparation Stage (build_index)!

Fields

  • rephraser::AbstractRephraser: the rephrasing method, dispatching rephrase - uses NoRephraser

  • embedder::AbstractEmbedder: the embedding method, dispatching get_embeddings (see Preparation Stage for more details) - uses BatchEmbedder

  • processor::AbstractProcessor: the processor method, dispatching get_keywords (see Preparation Stage for more details) - uses NoProcessor

  • finder::AbstractSimilarityFinder: the similarity search method, dispatching find_closest - uses CosineSimilarity

  • tagger::AbstractTagger: the tag generating method, dispatching get_tags (see Preparation Stage for more details) - uses NoTagger

  • filter::AbstractTagFilter: the tag matching method, dispatching find_tags - uses NoTagFilter

  • reranker::AbstractReranker: the reranking method, dispatching rerank - uses NoReranker

source


# PromptingTools.Experimental.RAGTools.StylerType.
julia
Styler

Defines styling keywords for printstyled for each AbstractAnnotatedNode

source


# PromptingTools.Experimental.RAGTools.SubChunkIndexType.
julia
SubChunkIndex

A view of the parent index with respect to the chunks (and chunk-aligned fields). All methods and accessors working for AbstractChunkIndex also work for SubChunkIndex. It does not yet work for MultiIndex.

Fields

  • parent::AbstractChunkIndex: the parent index from which the chunks are drawn (always the original index, never a view)

  • positions::Vector{Int}: the positions of the chunks in the parent index (always refers to original PARENT index, even if we create a view of the view)

Example

julia
cc = CandidateChunks(index.id, 1:10)
 sub_index = @view(index[cc])

You can use SubChunkIndex to access chunks or sources (and other fields) from a parent index, eg,

julia
RT.chunks(sub_index)
 RT.sources(sub_index)
 RT.chunkdata(sub_index) # slice of embeddings
@@ -74,9 +74,9 @@
 RT.tags(sub_index) # slice of tags
 RT.tags_vocab(sub_index) # unchanged, identical to parent version
 RT.extras(sub_index) # slice of extras

Access the parent index that the positions correspond to

julia
parent(sub_index)
-RT.positions(sub_index)

source


# PromptingTools.Experimental.RAGTools.SubDocumentTermMatrixType.

A partial view of a DocumentTermMatrix, tf is MATERIALIZED for performance and fewer allocations.

source


# PromptingTools.Experimental.RAGTools.TavilySearchRefinerType.
julia
TavilySearchRefiner <: AbstractRefiner

Refines the answer by executing a web search using the Tavily API. This method aims to enhance the answer's accuracy and relevance by incorporating information retrieved from the web. A method for refine!.

source


# PromptingTools.Experimental.RAGTools.TextChunkerType.
julia
TextChunker <: AbstractChunker

Chunker when you provide text to get_chunks functions. Inputs are directly chunked

source


# PromptingTools.Experimental.RAGTools.TrigramAnnotaterType.
julia
TrigramAnnotater

Annotation method where we score answer versus each context based on word-level trigrams that match.

It's very simple method (and it can loose some semantic meaning in longer sequences like negative), but it works reasonably well for both text and code.

source


# PromptingTools.Experimental.RAGTools._normalizeFunction.

Shortcut to LinearAlgebra.normalize. Provided in the package extension RAGToolsExperimentalExt (Requires SparseArrays, Unicode, and LinearAlgebra)

source


# PromptingTools.Experimental.RAGTools.add_node_metadata!Method.
julia
add_node_metadata!(annotater::TrigramAnnotater,
+RT.positions(sub_index)

source


# PromptingTools.Experimental.RAGTools.SubDocumentTermMatrixType.

A partial view of a DocumentTermMatrix, tf is MATERIALIZED for performance and fewer allocations.

source


# PromptingTools.Experimental.RAGTools.TavilySearchRefinerType.
julia
TavilySearchRefiner <: AbstractRefiner

Refines the answer by executing a web search using the Tavily API. This method aims to enhance the answer's accuracy and relevance by incorporating information retrieved from the web. A method for refine!.

source


# PromptingTools.Experimental.RAGTools.TextChunkerType.
julia
TextChunker <: AbstractChunker

Chunker when you provide text to get_chunks functions. Inputs are directly chunked

source


# PromptingTools.Experimental.RAGTools.TrigramAnnotaterType.
julia
TrigramAnnotater

Annotation method where we score answer versus each context based on word-level trigrams that match.

It's very simple method (and it can loose some semantic meaning in longer sequences like negative), but it works reasonably well for both text and code.

source


# PromptingTools.Experimental.RAGTools._normalizeFunction.

Shortcut to LinearAlgebra.normalize. Provided in the package extension RAGToolsExperimentalExt (Requires SparseArrays, Unicode, and LinearAlgebra)

source


# PromptingTools.Experimental.RAGTools.add_node_metadata!Method.
julia
add_node_metadata!(annotater::TrigramAnnotater,
     root::AnnotatedNode; add_sources::Bool = true, add_scores::Bool = true,
-    sources::Union{Nothing, AbstractVector{<:AbstractString}} = nothing)

Adds metadata to the children of root. Metadata includes sources and scores, if requested.

Optionally, it can add a list of sources at the end of the printed text.

The metadata is added by inserting new nodes in the root children list (with no children of its own to be printed out).

source


# PromptingTools.Experimental.RAGTools.airagMethod.
julia
airag(cfg::AbstractRAGConfig, index::AbstractDocumentIndex;
+    sources::Union{Nothing, AbstractVector{<:AbstractString}} = nothing)

Adds metadata to the children of root. Metadata includes sources and scores, if requested.

Optionally, it can add a list of sources at the end of the printed text.

The metadata is added by inserting new nodes in the root children list (with no children of its own to be printed out).

source


# PromptingTools.Experimental.RAGTools.airagMethod.
julia
airag(cfg::AbstractRAGConfig, index::AbstractDocumentIndex;
     question::AbstractString,
     verbose::Integer = 1, return_all::Bool = false,
     api_kwargs::NamedTuple = NamedTuple(),
@@ -123,7 +123,7 @@
 result = airag(cfg, multi_index; question, return_all=true)
 
 # Pretty-print the result
-PT.pprint(result)

For easier manipulation of nested kwargs, see utilities getpropertynested, setpropertynested, merge_kwargs_nested.

source


# PromptingTools.Experimental.RAGTools.align_node_styles!Method.
julia
align_node_styles!(annotater::TrigramAnnotater, nodes::AbstractVector{<:AnnotatedNode}; kwargs...)

Aligns the styles of the nodes based on the surrounding nodes ("fill-in-the-middle").

If the node has no score, but the surrounding nodes have the same style, the node will inherit the style of the surrounding nodes.

source


# PromptingTools.Experimental.RAGTools.annotate_supportMethod.
julia
annotate_support(annotater::TrigramAnnotater, answer::AbstractString,
+PT.pprint(result)

For easier manipulation of nested kwargs, see utilities getpropertynested, setpropertynested, merge_kwargs_nested.

source


# PromptingTools.Experimental.RAGTools.align_node_styles!Method.
julia
align_node_styles!(annotater::TrigramAnnotater, nodes::AbstractVector{<:AnnotatedNode}; kwargs...)

Aligns the styles of the nodes based on the surrounding nodes ("fill-in-the-middle").

If the node has no score, but the surrounding nodes have the same style, the node will inherit the style of the surrounding nodes.

source


# PromptingTools.Experimental.RAGTools.annotate_supportMethod.
julia
annotate_support(annotater::TrigramAnnotater, answer::AbstractString,
     context::AbstractVector; min_score::Float64 = 0.5,
     skip_trigrams::Bool = true, hashed::Bool = true,
     sources::Union{Nothing, AbstractVector{<:AbstractString}} = nothing,
@@ -135,7 +135,7 @@
 answer = "This is a test context. Another context sentence."
 
 annotated_root = annotate_support(annotater, answer, context)
-pprint(annotated_root) # pretty print the annotated tree

source


# PromptingTools.Experimental.RAGTools.annotate_supportMethod.
julia
annotate_support(
+pprint(annotated_root) # pretty print the annotated tree

source


# PromptingTools.Experimental.RAGTools.annotate_supportMethod.
julia
annotate_support(
     annotater::TrigramAnnotater, result::AbstractRAGResult; min_score::Float64 = 0.5,
     skip_trigrams::Bool = true, hashed::Bool = true,
     min_source_score::Float64 = 0.25,
@@ -143,12 +143,12 @@
     add_scores::Bool = true, kwargs...)

Dispatch for annotate_support for AbstractRAGResult type. It extracts the final_answer and context from the result and calls annotate_support with them.

See annotate_support for more details.

Example

julia
res = RAGResult(; question = "", final_answer = "This is a test.",
     context = ["Test context.", "Completely different"])
 annotated_root = annotate_support(annotater, res)
-PT.pprint(annotated_root)

source


# PromptingTools.Experimental.RAGTools.answer!Method.
julia
answer!(
+PT.pprint(annotated_root)

source


# PromptingTools.Experimental.RAGTools.answer!Method.
julia
answer!(
     answerer::SimpleAnswerer, index::AbstractDocumentIndex, result::AbstractRAGResult;
     model::AbstractString = PT.MODEL_CHAT, verbose::Bool = true,
     template::Symbol = :RAGAnswerFromContext,
     cost_tracker = Threads.Atomic{Float64}(0.0),
-    kwargs...)

Generates an answer using the aigenerate function with the provided result.context and result.question.

Returns

  • Mutated result with result.answer and the full conversation saved in result.conversations[:answer]

Arguments

  • answerer::SimpleAnswerer: The method to use for generating the answer. Uses aigenerate.

  • index::AbstractDocumentIndex: The index containing chunks and sources.

  • result::AbstractRAGResult: The result containing the context and question to generate the answer for.

  • model::AbstractString: The model to use for generating the answer. Defaults to PT.MODEL_CHAT.

  • verbose::Bool: If true, enables verbose logging.

  • template::Symbol: The template to use for the aigenerate function. Defaults to :RAGAnswerFromContext.

  • cost_tracker: An atomic counter to track the cost of the operation.

source


# PromptingTools.Experimental.RAGTools.build_contextMethod.
julia
build_context(contexter::ContextEnumerator,
+    kwargs...)

Generates an answer using the aigenerate function with the provided result.context and result.question.

Returns

  • Mutated result with result.answer and the full conversation saved in result.conversations[:answer]

Arguments

  • answerer::SimpleAnswerer: The method to use for generating the answer. Uses aigenerate.

  • index::AbstractDocumentIndex: The index containing chunks and sources.

  • result::AbstractRAGResult: The result containing the context and question to generate the answer for.

  • model::AbstractString: The model to use for generating the answer. Defaults to PT.MODEL_CHAT.

  • verbose::Bool: If true, enables verbose logging.

  • template::Symbol: The template to use for the aigenerate function. Defaults to :RAGAnswerFromContext.

  • cost_tracker: An atomic counter to track the cost of the operation.

source


# PromptingTools.Experimental.RAGTools.build_contextMethod.
julia
build_context(contexter::ContextEnumerator,
     index::AbstractDocumentIndex, candidates::AbstractCandidateChunks;
     verbose::Bool = true,
     chunks_window_margin::Tuple{Int, Int} = (1, 1), kwargs...)
@@ -156,7 +156,7 @@
     build_context!(contexter::ContextEnumerator,
     index::AbstractDocumentIndex, result::AbstractRAGResult; kwargs...)

Build context strings for each position in candidates considering a window margin around each position. If mutating version is used (build_context!), it will use result.reranked_candidates to update the result.context field.

Arguments

  • contexter::ContextEnumerator: The method to use for building the context. Enumerates the snippets.

  • index::AbstractDocumentIndex: The index containing chunks and sources.

  • candidates::AbstractCandidateChunks: Candidate chunks which contain positions to extract context from.

  • verbose::Bool: If true, enables verbose logging.

  • chunks_window_margin::Tuple{Int, Int}: A tuple indicating the margin (before, after) around each position to include in the context. Defaults to (1,1), which means 1 preceding and 1 suceeding chunk will be included. With (0,0), only the matching chunks will be included.

Returns

  • Vector{String}: A vector of context strings, each corresponding to a position in reranked_candidates.

Examples

julia
index = ChunkIndex(...)  # Assuming a proper index is defined
 candidates = CandidateChunks(index.id, [2, 4], [0.1, 0.2])
-context = build_context(ContextEnumerator(), index, candidates; chunks_window_margin=(0, 1)) # include only one following chunk for each matching chunk

source


# PromptingTools.Experimental.RAGTools.build_indexMethod.
julia
build_index(
+context = build_context(ContextEnumerator(), index, candidates; chunks_window_margin=(0, 1)) # include only one following chunk for each matching chunk

source


# PromptingTools.Experimental.RAGTools.build_indexMethod.
julia
build_index(
     indexer::KeywordsIndexer, files_or_docs::Vector{<:AbstractString};
     verbose::Integer = 1,
     extras::Union{Nothing, AbstractVector} = nothing,
@@ -168,7 +168,7 @@
     tagger::AbstractTagger = indexer.tagger,
     tagger_kwargs::NamedTuple = NamedTuple(),
     api_kwargs::NamedTuple = NamedTuple(),
-    cost_tracker = Threads.Atomic{Float64}(0.0))

Builds a ChunkKeywordsIndex from the provided files or documents to support keyword-based search (BM25).

source


# PromptingTools.Experimental.RAGTools.build_indexMethod.
julia
build_index(
+    cost_tracker = Threads.Atomic{Float64}(0.0))

Builds a ChunkKeywordsIndex from the provided files or documents to support keyword-based search (BM25).

source


# PromptingTools.Experimental.RAGTools.build_indexMethod.
julia
build_index(
     indexer::AbstractIndexBuilder, files_or_docs::Vector{<:AbstractString};
     verbose::Integer = 1,
     extras::Union{Nothing, AbstractVector} = nothing,
@@ -187,45 +187,45 @@
 # Assuming `test_files` is a vector of file paths
 indexer = SimpleIndexer(chunker=FileChunker(), tagger=OpenTagger())
 index = build_index(indexer, test_files; 
-        chunker_kwargs(; separators=[". "]), verbose=true)

Notes

  • If you get errors about exceeding embedding input sizes, first check the max_length in your chunks. If that does NOT resolve the issue, try changing the embedding_kwargs. In particular, reducing the target_batch_size_length parameter (eg, 10_000) and number of tasks ntasks=1. Some providers cannot handle large batch sizes (eg, Databricks).

source


# PromptingTools.Experimental.RAGTools.build_qa_evalsMethod.
julia
build_qa_evals(doc_chunks::Vector{<:AbstractString}, sources::Vector{<:AbstractString};
+        chunker_kwargs(; separators=[". "]), verbose=true)

Notes

  • If you get errors about exceeding embedding input sizes, first check the max_length in your chunks. If that does NOT resolve the issue, try changing the embedding_kwargs. In particular, reducing the target_batch_size_length parameter (eg, 10_000) and number of tasks ntasks=1. Some providers cannot handle large batch sizes (eg, Databricks).

source


# PromptingTools.Experimental.RAGTools.build_qa_evalsMethod.
julia
build_qa_evals(doc_chunks::Vector{<:AbstractString}, sources::Vector{<:AbstractString};
                model=PT.MODEL_CHAT, instructions="None.", qa_template::Symbol=:RAGCreateQAFromContext, 
                verbose::Bool=true, api_kwargs::NamedTuple = NamedTuple(), kwargs...) -> Vector{QAEvalItem}

Create a collection of question and answer evaluations (QAEvalItem) from document chunks and sources. This function generates Q&A pairs based on the provided document chunks, using a specified AI model and template.

Arguments

  • doc_chunks::Vector{<:AbstractString}: A vector of document chunks, each representing a segment of text.

  • sources::Vector{<:AbstractString}: A vector of source identifiers corresponding to each chunk in doc_chunks (eg, filenames or paths).

  • model: The AI model used for generating Q&A pairs. Default is PT.MODEL_CHAT.

  • instructions::String: Additional instructions or context to provide to the model generating QA sets. Defaults to "None.".

  • qa_template::Symbol: A template symbol that dictates the AITemplate that will be used. It must have placeholder context. Default is :CreateQAFromContext.

  • api_kwargs::NamedTuple: Parameters that will be forwarded to the API endpoint.

  • verbose::Bool: If true, additional information like costs will be logged. Defaults to true.

Returns

Vector{QAEvalItem}: A vector of QAEvalItem structs, each containing a source, context, question, and answer. Invalid or empty items are filtered out.

Notes

  • The function internally uses aiextract to generate Q&A pairs based on the provided qa_template. So you can use any kwargs that you want.

  • Each QAEvalItem includes the context (document chunk), the generated question and answer, and the source.

  • The function tracks and reports the cost of AI calls if verbose is enabled.

  • Items where the question, answer, or context is empty are considered invalid and are filtered out.

Examples

Creating Q&A evaluations from a set of document chunks:

julia
doc_chunks = ["Text from document 1", "Text from document 2"]
 sources = ["source1", "source2"]
-qa_evals = build_qa_evals(doc_chunks, sources)

source


# PromptingTools.Experimental.RAGTools.build_tagsFunction.

Builds a matrix of tags and a vocabulary list. REQUIRES SparseArrays, LinearAlgebra, Unicode packages to be loaded!!

source


# PromptingTools.Experimental.RAGTools.build_tagsMethod.
julia
build_tags(tagger::AbstractTagger, chunk_tags::Nothing; kwargs...)

No-op that skips any tag building, returning nothing, nothing

Otherwise, it would build the sparse matrix and the vocabulary (requires SparseArrays and LinearAlgebra packages to be loaded).

source


# PromptingTools.Experimental.RAGTools.chunkdataMethod.

Access chunkdata for a subset of chunks, chunk_idx is a vector of chunk indices in the index

source


# PromptingTools.Experimental.RAGTools.chunkdataMethod.

Access chunkdata for a subset of chunks, chunk_idx is a vector of chunk indices in the index

source


# PromptingTools.Experimental.RAGTools.chunkdataMethod.

Access chunkdata for a subset of chunks, chunk_idx is a vector of chunk indices in the index

source


# PromptingTools.Experimental.RAGTools.cohere_apiMethod.
julia
cohere_api(;
+qa_evals = build_qa_evals(doc_chunks, sources)

source


# PromptingTools.Experimental.RAGTools.build_tagsFunction.

Builds a matrix of tags and a vocabulary list. REQUIRES SparseArrays, LinearAlgebra, Unicode packages to be loaded!!

source


# PromptingTools.Experimental.RAGTools.build_tagsMethod.
julia
build_tags(tagger::AbstractTagger, chunk_tags::Nothing; kwargs...)

No-op that skips any tag building, returning nothing, nothing

Otherwise, it would build the sparse matrix and the vocabulary (requires SparseArrays and LinearAlgebra packages to be loaded).

source


# PromptingTools.Experimental.RAGTools.chunkdataMethod.

Access chunkdata for a subset of chunks, chunk_idx is a vector of chunk indices in the index

source


# PromptingTools.Experimental.RAGTools.chunkdataMethod.

Access chunkdata for a subset of chunks, chunk_idx is a vector of chunk indices in the index

source


# PromptingTools.Experimental.RAGTools.chunkdataMethod.

Access chunkdata for a subset of chunks, chunk_idx is a vector of chunk indices in the index

source


# PromptingTools.Experimental.RAGTools.cohere_apiMethod.
julia
cohere_api(;
 api_key::AbstractString,
 endpoint::String,
 url::AbstractString="https://api.cohere.ai/v1",
 http_kwargs::NamedTuple=NamedTuple(),
-kwargs...)

Lightweight wrapper around the Cohere API. See https://cohere.com/docs for more details.

Arguments

  • api_key: Your Cohere API key. You can get one from https://dashboard.cohere.com/welcome/register (trial access is for free).

  • endpoint: The Cohere endpoint to call.

  • url: The base URL for the Cohere API. Default is https://api.cohere.ai/v1.

  • http_kwargs: Any additional keyword arguments to pass to HTTP.post.

  • kwargs: Any additional keyword arguments to pass to the Cohere API.

source


# PromptingTools.Experimental.RAGTools.create_permutation_instructionMethod.
julia
create_permutation_instruction(
+kwargs...)

Lightweight wrapper around the Cohere API. See https://cohere.com/docs for more details.

Arguments

  • api_key: Your Cohere API key. You can get one from https://dashboard.cohere.com/welcome/register (trial access is for free).

  • endpoint: The Cohere endpoint to call.

  • url: The base URL for the Cohere API. Default is https://api.cohere.ai/v1.

  • http_kwargs: Any additional keyword arguments to pass to HTTP.post.

  • kwargs: Any additional keyword arguments to pass to the Cohere API.

source


# PromptingTools.Experimental.RAGTools.create_permutation_instructionMethod.
julia
create_permutation_instruction(
     context::AbstractVector{<:AbstractString}; rank_start::Integer = 1,
-    rank_end::Integer = 100, max_length::Integer = 512, template::Symbol = :RAGRankGPT)

Creates rendered template with injected context passages.

source


# PromptingTools.Experimental.RAGTools.extract_rankingMethod.
julia
extract_ranking(str::AbstractString)

Extracts the ranking from the response into a sorted array of integers.

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(
+    rank_end::Integer = 100, max_length::Integer = 512, template::Symbol = :RAGRankGPT)

Creates rendered template with injected context passages.

source


# PromptingTools.Experimental.RAGTools.extract_rankingMethod.
julia
extract_ranking(str::AbstractString)

Extracts the ranking from the response into a sorted array of integers.

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(
+    finder::BinaryCosineSimilarity, emb::AbstractMatrix{<:Bool},
+    query_emb::AbstractVector{<:Real}, query_tokens::AbstractVector{<:AbstractString} = String[];
+    top_k::Int = 100, rescore_multiplier::Int = 4, minimum_similarity::AbstractFloat = -1.0, kwargs...)

Finds the indices of chunks (represented by embeddings in emb) that are closest to query embedding (query_emb) using binary embeddings (in the index).

This is a two-pass approach:

  • First pass: Hamming distance in binary form to get the top_k * rescore_multiplier (ie, more than top_k) candidates.

  • Second pass: Rescore the candidates with float embeddings and return the top_k.

Returns only top_k closest indices.

Reference: HuggingFace: Embedding Quantization.

Examples

Convert any Float embeddings to binary like this:

julia
binary_emb = map(>(0), emb)

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(
     finder::AbstractSimilarityFinder, index::AbstractChunkIndex,
     query_emb::AbstractVector{<:Real}, query_tokens::AbstractVector{<:AbstractString} = String[];
-    top_k::Int = 100, kwargs...)

Finds the indices of chunks (represented by embeddings in index) that are closest to query embedding (query_emb).

Returns only top_k closest indices.

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(
-    finder::BitPackedCosineSimilarity, emb::AbstractMatrix{<:Bool},
+    top_k::Int = 100, kwargs...)

Finds the indices of chunks (represented by embeddings in index) that are closest to query embedding (query_emb).

Returns only top_k closest indices.

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(
+    finder::CosineSimilarity, emb::AbstractMatrix{<:Real},
     query_emb::AbstractVector{<:Real}, query_tokens::AbstractVector{<:AbstractString} = String[];
-    top_k::Int = 100, rescore_multiplier::Int = 4, minimum_similarity::AbstractFloat = -1.0, kwargs...)

Finds the indices of chunks (represented by embeddings in emb) that are closest to query embedding (query_emb) using bit-packed binary embeddings (in the index).

This is a two-pass approach:

  • First pass: Hamming distance in bit-packed binary form to get the top_k * rescore_multiplier (i.e., more than top_k) candidates.

  • Second pass: Rescore the candidates with float embeddings and return the top_k.

Returns only top_k closest indices.

Reference: HuggingFace: Embedding Quantization.

Examples

Convert any Float embeddings to bit-packed binary like this:

julia
bitpacked_emb = pack_bits(emb.>0)

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(
+    top_k::Int = 100, minimum_similarity::AbstractFloat = -1.0, kwargs...)

Finds the indices of chunks (represented by embeddings in emb) that are closest (in cosine similarity for CosineSimilarity()) to query embedding (query_emb).

finder is the logic used for the similarity search. Default is CosineSimilarity.

If minimum_similarity is provided, only indices with similarity greater than or equal to it are returned. Similarity can be between -1 and 1 (-1 = completely opposite, 1 = exactly the same).

Returns only top_k closest indices.

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(
     finder::BM25Similarity, dtm::AbstractDocumentTermMatrix,
     query_emb::AbstractVector{<:Real}, query_tokens::AbstractVector{<:AbstractString} = String[];
-    top_k::Int = 100, minimum_similarity::AbstractFloat = -1.0, kwargs...)

Finds the indices of chunks (represented by DocumentTermMatrix in dtm) that are closest to query tokens (query_tokens) using BM25.

Reference: Wikipedia: BM25. Implementation follows: The Next Generation of Lucene Relevance.

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(
-    finder::CosineSimilarity, emb::AbstractMatrix{<:Real},
-    query_emb::AbstractVector{<:Real}, query_tokens::AbstractVector{<:AbstractString} = String[];
-    top_k::Int = 100, minimum_similarity::AbstractFloat = -1.0, kwargs...)

Finds the indices of chunks (represented by embeddings in emb) that are closest (in cosine similarity for CosineSimilarity()) to query embedding (query_emb).

finder is the logic used for the similarity search. Default is CosineSimilarity.

If minimum_similarity is provided, only indices with similarity greater than or equal to it are returned. Similarity can be between -1 and 1 (-1 = completely opposite, 1 = exactly the same).

Returns only top_k closest indices.

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(
-    finder::BinaryCosineSimilarity, emb::AbstractMatrix{<:Bool},
+    top_k::Int = 100, minimum_similarity::AbstractFloat = -1.0, kwargs...)

Finds the indices of chunks (represented by DocumentTermMatrix in dtm) that are closest to query tokens (query_tokens) using BM25.

Reference: Wikipedia: BM25. Implementation follows: The Next Generation of Lucene Relevance.

source


# PromptingTools.Experimental.RAGTools.find_closestFunction.
julia
find_closest(
+    finder::BitPackedCosineSimilarity, emb::AbstractMatrix{<:Bool},
     query_emb::AbstractVector{<:Real}, query_tokens::AbstractVector{<:AbstractString} = String[];
-    top_k::Int = 100, rescore_multiplier::Int = 4, minimum_similarity::AbstractFloat = -1.0, kwargs...)

Finds the indices of chunks (represented by embeddings in emb) that are closest to query embedding (query_emb) using binary embeddings (in the index).

This is a two-pass approach:

  • First pass: Hamming distance in binary form to get the top_k * rescore_multiplier (ie, more than top_k) candidates.

  • Second pass: Rescore the candidates with float embeddings and return the top_k.

Returns only top_k closest indices.

Reference: HuggingFace: Embedding Quantization.

Examples

Convert any Float embeddings to binary like this:

julia
binary_emb = map(>(0), emb)

source


# PromptingTools.Experimental.RAGTools.find_tagsMethod.
julia
find_tags(method::AnyTagFilter, index::AbstractChunkIndex,
+    top_k::Int = 100, rescore_multiplier::Int = 4, minimum_similarity::AbstractFloat = -1.0, kwargs...)

Finds the indices of chunks (represented by embeddings in emb) that are closest to query embedding (query_emb) using bit-packed binary embeddings (in the index).

This is a two-pass approach:

  • First pass: Hamming distance in bit-packed binary form to get the top_k * rescore_multiplier (i.e., more than top_k) candidates.

  • Second pass: Rescore the candidates with float embeddings and return the top_k.

Returns only top_k closest indices.

Reference: HuggingFace: Embedding Quantization.

Examples

Convert any Float embeddings to bit-packed binary like this:

julia
bitpacked_emb = pack_bits(emb.>0)

source


# PromptingTools.Experimental.RAGTools.find_tagsMethod.
julia
find_tags(method::AnyTagFilter, index::AbstractChunkIndex,
     tag::Union{AbstractString, Regex}; kwargs...)
 
 find_tags(method::AnyTagFilter, index::AbstractChunkIndex,
-    tags::Vector{T}; kwargs...) where {T <: Union{AbstractString, Regex}}

Finds the indices of chunks (represented by tags in index) that have ANY OF the specified tag or tags.

source


# PromptingTools.Experimental.RAGTools.find_tagsMethod.
julia
find_tags(method::AllTagFilter, index::AbstractChunkIndex,
+    tags::Vector{T}; kwargs...) where {T <: Union{AbstractString, Regex}}

Finds the indices of chunks (represented by tags in index) that have ANY OF the specified tag or tags.

source


# PromptingTools.Experimental.RAGTools.find_tagsMethod.
julia
find_tags(method::AllTagFilter, index::AbstractChunkIndex,
     tag::Union{AbstractString, Regex}; kwargs...)
 
 find_tags(method::AllTagFilter, index::AbstractChunkIndex,
-    tags::Vector{T}; kwargs...) where {T <: Union{AbstractString, Regex}}

Finds the indices of chunks (represented by tags in index) that have ALL OF the specified tag or tags.

source


# PromptingTools.Experimental.RAGTools.find_tagsMethod.
julia
find_tags(method::NoTagFilter, index::AbstractChunkIndex,
+    tags::Vector{T}; kwargs...) where {T <: Union{AbstractString, Regex}}

Finds the indices of chunks (represented by tags in index) that have ALL OF the specified tag or tags.

source


# PromptingTools.Experimental.RAGTools.find_tagsMethod.
julia
find_tags(method::NoTagFilter, index::AbstractChunkIndex,
     tags::Union{T, AbstractVector{<:T}}; kwargs...) where {T <:
                                                            Union{
     AbstractString, Regex, Nothing}}
-    tags; kwargs...)

Returns all chunks in the index, ie, no filtering, so we simply return nothing (easier for dispatch).

source


# PromptingTools.Experimental.RAGTools.generate!Method.
julia
generate!(
+    tags; kwargs...)

Returns all chunks in the index, ie, no filtering, so we simply return nothing (easier for dispatch).

source


# PromptingTools.Experimental.RAGTools.generate!Method.
julia
generate!(
     generator::AbstractGenerator, index::AbstractDocumentIndex, result::AbstractRAGResult;
     verbose::Integer = 1,
     api_kwargs::NamedTuple = NamedTuple(),
@@ -246,18 +246,18 @@
 result = retrieve(index, question)
 
 # Generate the answer using the default generator, mutates the same result
-result = generate!(index, result)

source


# PromptingTools.Experimental.RAGTools.get_chunksMethod.
julia
get_chunks(chunker::AbstractChunker,
+result = generate!(index, result)

source


# PromptingTools.Experimental.RAGTools.get_chunksMethod.
julia
get_chunks(chunker::AbstractChunker,
     files_or_docs::Vector{<:AbstractString};
     sources::AbstractVector{<:AbstractString} = files_or_docs,
     verbose::Bool = true,
-    separators = ["\n\n", ". ", "\n", " "], max_length::Int = 256)

Chunks the provided files_or_docs into chunks of maximum length max_length (if possible with provided separators).

Supports two modes of operation:

  • chunker = FileChunker(): The function opens each file in files_or_docs and reads its contents.

  • chunker = TextChunker(): The function assumes that files_or_docs is a vector of strings to be chunked, you MUST provide corresponding sources.

Arguments

  • files_or_docs: A vector of valid file paths OR string documents to be chunked.

  • separators: A list of strings used as separators for splitting the text in each file into chunks. Default is [\n\n", ". ", "\n", " "]. See recursive_splitter for more details.

  • max_length: The maximum length of each chunk (if possible with provided separators). Default is 256.

  • sources: A vector of strings indicating the source of each chunk. Default is equal to files_or_docs (for reader=:files)

source


# PromptingTools.Experimental.RAGTools.get_embeddingsMethod.
julia
get_embeddings(embedder::BatchEmbedder, docs::AbstractVector{<:AbstractString};
+    separators = ["\n\n", ". ", "\n", " "], max_length::Int = 256)

Chunks the provided files_or_docs into chunks of maximum length max_length (if possible with provided separators).

Supports two modes of operation:

  • chunker = FileChunker(): The function opens each file in files_or_docs and reads its contents.

  • chunker = TextChunker(): The function assumes that files_or_docs is a vector of strings to be chunked, you MUST provide corresponding sources.

Arguments

  • files_or_docs: A vector of valid file paths OR string documents to be chunked.

  • separators: A list of strings used as separators for splitting the text in each file into chunks. Default is [\n\n", ". ", "\n", " "]. See recursive_splitter for more details.

  • max_length: The maximum length of each chunk (if possible with provided separators). Default is 256.

  • sources: A vector of strings indicating the source of each chunk. Default is equal to files_or_docs (for reader=:files)

source


# PromptingTools.Experimental.RAGTools.get_embeddingsMethod.
julia
get_embeddings(embedder::BatchEmbedder, docs::AbstractVector{<:AbstractString};
     verbose::Bool = true,
     model::AbstractString = PT.MODEL_EMBEDDING,
     truncate_dimension::Union{Int, Nothing} = nothing,
     cost_tracker = Threads.Atomic{Float64}(0.0),
     target_batch_size_length::Int = 80_000,
     ntasks::Int = 4 * Threads.nthreads(),
-    kwargs...)

Embeds a vector of docs using the provided model (kwarg model) in a batched manner - BatchEmbedder.

BatchEmbedder tries to batch embedding calls for roughly 80K characters per call (to avoid exceeding the API rate limit) to reduce network latency.

Notes

  • docs are assumed to be already chunked to the reasonable sizes that fit within the embedding context limit.

  • If you get errors about exceeding input sizes, first check the max_length in your chunks. If that does NOT resolve the issue, try reducing the target_batch_size_length parameter (eg, 10_000) and number of tasks ntasks=1. Some providers cannot handle large batch sizes.

Arguments

  • docs: A vector of strings to be embedded.

  • verbose: A boolean flag for verbose output. Default is true.

  • model: The model to use for embedding. Default is PT.MODEL_EMBEDDING.

  • truncate_dimension: The dimensionality of the embeddings to truncate to. Default is nothing, 0 will also do nothing.

  • cost_tracker: A Threads.Atomic{Float64} object to track the total cost of the API calls. Useful to pass the total cost to the parent call.

  • target_batch_size_length: The target length (in characters) of each batch of document chunks sent for embedding. Default is 80_000 characters. Speeds up embedding process.

  • ntasks: The number of tasks to use for asyncmap. Default is 4 * Threads.nthreads().

source


# PromptingTools.Experimental.RAGTools.get_embeddingsMethod.
julia
get_embeddings(embedder::BinaryBatchEmbedder, docs::AbstractVector{<:AbstractString};
+    kwargs...)

Embeds a vector of docs using the provided model (kwarg model) in a batched manner - BatchEmbedder.

BatchEmbedder tries to batch embedding calls for roughly 80K characters per call (to avoid exceeding the API rate limit) to reduce network latency.

Notes

  • docs are assumed to be already chunked to the reasonable sizes that fit within the embedding context limit.

  • If you get errors about exceeding input sizes, first check the max_length in your chunks. If that does NOT resolve the issue, try reducing the target_batch_size_length parameter (eg, 10_000) and number of tasks ntasks=1. Some providers cannot handle large batch sizes.

Arguments

  • docs: A vector of strings to be embedded.

  • verbose: A boolean flag for verbose output. Default is true.

  • model: The model to use for embedding. Default is PT.MODEL_EMBEDDING.

  • truncate_dimension: The dimensionality of the embeddings to truncate to. Default is nothing, 0 will also do nothing.

  • cost_tracker: A Threads.Atomic{Float64} object to track the total cost of the API calls. Useful to pass the total cost to the parent call.

  • target_batch_size_length: The target length (in characters) of each batch of document chunks sent for embedding. Default is 80_000 characters. Speeds up embedding process.

  • ntasks: The number of tasks to use for asyncmap. Default is 4 * Threads.nthreads().

source


# PromptingTools.Experimental.RAGTools.get_embeddingsMethod.
julia
get_embeddings(embedder::BinaryBatchEmbedder, docs::AbstractVector{<:AbstractString};
     verbose::Bool = true,
     model::AbstractString = PT.MODEL_EMBEDDING,
     truncate_dimension::Union{Int, Nothing} = nothing,
@@ -265,24 +265,24 @@
     cost_tracker = Threads.Atomic{Float64}(0.0),
     target_batch_size_length::Int = 80_000,
     ntasks::Int = 4 * Threads.nthreads(),
-    kwargs...)

Embeds a vector of docs using the provided model (kwarg model) in a batched manner and then returns the binary embeddings matrix - BinaryBatchEmbedder.

BinaryBatchEmbedder tries to batch embedding calls for roughly 80K characters per call (to avoid exceeding the API rate limit) to reduce network latency.

Notes

  • docs are assumed to be already chunked to the reasonable sizes that fit within the embedding context limit.

  • If you get errors about exceeding input sizes, first check the max_length in your chunks. If that does NOT resolve the issue, try reducing the target_batch_size_length parameter (eg, 10_000) and number of tasks ntasks=1. Some providers cannot handle large batch sizes.

Arguments

  • docs: A vector of strings to be embedded.

  • verbose: A boolean flag for verbose output. Default is true.

  • model: The model to use for embedding. Default is PT.MODEL_EMBEDDING.

  • truncate_dimension: The dimensionality of the embeddings to truncate to. Default is nothing.

  • return_type: The type of the returned embeddings matrix. Default is Matrix{Bool}. Choose BitMatrix to minimize storage requirements, Matrix{Bool} to maximize performance in elementwise-ops.

  • cost_tracker: A Threads.Atomic{Float64} object to track the total cost of the API calls. Useful to pass the total cost to the parent call.

  • target_batch_size_length: The target length (in characters) of each batch of document chunks sent for embedding. Default is 80_000 characters. Speeds up embedding process.

  • ntasks: The number of tasks to use for asyncmap. Default is 4 * Threads.nthreads().

source


# PromptingTools.Experimental.RAGTools.get_embeddingsMethod.
julia
get_embeddings(embedder::BitPackedBatchEmbedder, docs::AbstractVector{<:AbstractString};
+    kwargs...)

Embeds a vector of docs using the provided model (kwarg model) in a batched manner and then returns the binary embeddings matrix - BinaryBatchEmbedder.

BinaryBatchEmbedder tries to batch embedding calls for roughly 80K characters per call (to avoid exceeding the API rate limit) to reduce network latency.

Notes

  • docs are assumed to be already chunked to the reasonable sizes that fit within the embedding context limit.

  • If you get errors about exceeding input sizes, first check the max_length in your chunks. If that does NOT resolve the issue, try reducing the target_batch_size_length parameter (eg, 10_000) and number of tasks ntasks=1. Some providers cannot handle large batch sizes.

Arguments

  • docs: A vector of strings to be embedded.

  • verbose: A boolean flag for verbose output. Default is true.

  • model: The model to use for embedding. Default is PT.MODEL_EMBEDDING.

  • truncate_dimension: The dimensionality of the embeddings to truncate to. Default is nothing.

  • return_type: The type of the returned embeddings matrix. Default is Matrix{Bool}. Choose BitMatrix to minimize storage requirements, Matrix{Bool} to maximize performance in elementwise-ops.

  • cost_tracker: A Threads.Atomic{Float64} object to track the total cost of the API calls. Useful to pass the total cost to the parent call.

  • target_batch_size_length: The target length (in characters) of each batch of document chunks sent for embedding. Default is 80_000 characters. Speeds up embedding process.

  • ntasks: The number of tasks to use for asyncmap. Default is 4 * Threads.nthreads().

source


# PromptingTools.Experimental.RAGTools.get_embeddingsMethod.
julia
get_embeddings(embedder::BitPackedBatchEmbedder, docs::AbstractVector{<:AbstractString};
     verbose::Bool = true,
     model::AbstractString = PT.MODEL_EMBEDDING,
     truncate_dimension::Union{Int, Nothing} = nothing,
     cost_tracker = Threads.Atomic{Float64}(0.0),
     target_batch_size_length::Int = 80_000,
     ntasks::Int = 4 * Threads.nthreads(),
-    kwargs...)

Embeds a vector of docs using the provided model (kwarg model) in a batched manner and then returns the binary embeddings matrix represented in UInt64 (bit-packed) - BitPackedBatchEmbedder.

BitPackedBatchEmbedder tries to batch embedding calls for roughly 80K characters per call (to avoid exceeding the API rate limit) to reduce network latency.

The best option for FAST and MEMORY-EFFICIENT storage of embeddings, for retrieval use BitPackedCosineSimilarity.

Notes

  • docs are assumed to be already chunked to the reasonable sizes that fit within the embedding context limit.

  • If you get errors about exceeding input sizes, first check the max_length in your chunks. If that does NOT resolve the issue, try reducing the target_batch_size_length parameter (eg, 10_000) and number of tasks ntasks=1. Some providers cannot handle large batch sizes.

Arguments

  • docs: A vector of strings to be embedded.

  • verbose: A boolean flag for verbose output. Default is true.

  • model: The model to use for embedding. Default is PT.MODEL_EMBEDDING.

  • truncate_dimension: The dimensionality of the embeddings to truncate to. Default is nothing.

  • cost_tracker: A Threads.Atomic{Float64} object to track the total cost of the API calls. Useful to pass the total cost to the parent call.

  • target_batch_size_length: The target length (in characters) of each batch of document chunks sent for embedding. Default is 80_000 characters. Speeds up embedding process.

  • ntasks: The number of tasks to use for asyncmap. Default is 4 * Threads.nthreads().

See also: unpack_bits, pack_bits, BitPackedCosineSimilarity.

source


# PromptingTools.Experimental.RAGTools.get_tagsMethod.
julia
get_tags(tagger::NoTagger, docs::AbstractVector{<:AbstractString};
-    kwargs...)

Simple no-op that skips any tagging of the documents

source


# PromptingTools.Experimental.RAGTools.get_tagsMethod.
julia
get_tags(tagger::OpenTagger, docs::AbstractVector{<:AbstractString};
+    kwargs...)

Embeds a vector of docs using the provided model (kwarg model) in a batched manner and then returns the binary embeddings matrix represented in UInt64 (bit-packed) - BitPackedBatchEmbedder.

BitPackedBatchEmbedder tries to batch embedding calls for roughly 80K characters per call (to avoid exceeding the API rate limit) to reduce network latency.

The best option for FAST and MEMORY-EFFICIENT storage of embeddings, for retrieval use BitPackedCosineSimilarity.

Notes

  • docs are assumed to be already chunked to the reasonable sizes that fit within the embedding context limit.

  • If you get errors about exceeding input sizes, first check the max_length in your chunks. If that does NOT resolve the issue, try reducing the target_batch_size_length parameter (eg, 10_000) and number of tasks ntasks=1. Some providers cannot handle large batch sizes.

Arguments

  • docs: A vector of strings to be embedded.

  • verbose: A boolean flag for verbose output. Default is true.

  • model: The model to use for embedding. Default is PT.MODEL_EMBEDDING.

  • truncate_dimension: The dimensionality of the embeddings to truncate to. Default is nothing.

  • cost_tracker: A Threads.Atomic{Float64} object to track the total cost of the API calls. Useful to pass the total cost to the parent call.

  • target_batch_size_length: The target length (in characters) of each batch of document chunks sent for embedding. Default is 80_000 characters. Speeds up embedding process.

  • ntasks: The number of tasks to use for asyncmap. Default is 4 * Threads.nthreads().

See also: unpack_bits, pack_bits, BitPackedCosineSimilarity.

source


# PromptingTools.Experimental.RAGTools.get_tagsMethod.
julia
get_tags(tagger::NoTagger, docs::AbstractVector{<:AbstractString};
+    kwargs...)

Simple no-op that skips any tagging of the documents

source


# PromptingTools.Experimental.RAGTools.get_tagsMethod.
julia
get_tags(tagger::OpenTagger, docs::AbstractVector{<:AbstractString};
     verbose::Bool = true,
     cost_tracker = Threads.Atomic{Float64}(0.0),
-    kwargs...)

Extracts "tags" (metadata/keywords) from a vector of docs using the provided model (kwarg model).

Arguments

  • docs: A vector of strings to be embedded.

  • verbose: A boolean flag for verbose output. Default is true.

  • model: The model to use for tags extraction. Default is PT.MODEL_CHAT.

  • template: A template to be used for tags extraction. Default is :RAGExtractMetadataShort.

  • cost_tracker: A Threads.Atomic{Float64} object to track the total cost of the API calls. Useful to pass the total cost to the parent call.

source


# PromptingTools.Experimental.RAGTools.get_tagsMethod.
julia
get_tags(tagger::PassthroughTagger, docs::AbstractVector{<:AbstractString};
+    kwargs...)

Extracts "tags" (metadata/keywords) from a vector of docs using the provided model (kwarg model).

Arguments

  • docs: A vector of strings to be embedded.

  • verbose: A boolean flag for verbose output. Default is true.

  • model: The model to use for tags extraction. Default is PT.MODEL_CHAT.

  • template: A template to be used for tags extraction. Default is :RAGExtractMetadataShort.

  • cost_tracker: A Threads.Atomic{Float64} object to track the total cost of the API calls. Useful to pass the total cost to the parent call.

source


# PromptingTools.Experimental.RAGTools.get_tagsMethod.
julia
get_tags(tagger::PassthroughTagger, docs::AbstractVector{<:AbstractString};
     tags::AbstractVector{<:AbstractVector{<:AbstractString}},
-    kwargs...)

Pass tags directly as Vector of Vectors of strings (ie, tags[i] is the tags for docs[i]). It then builds the vocabulary from the tags and returns both the tags in matrix form and the vocabulary.

source


# PromptingTools.Experimental.RAGTools.getpropertynestedFunction.
julia
getpropertynested(
+    kwargs...)

Pass tags directly as Vector of Vectors of strings (ie, tags[i] is the tags for docs[i]). It then builds the vocabulary from the tags and returns both the tags in matrix form and the vocabulary.

source


# PromptingTools.Experimental.RAGTools.getpropertynestedFunction.
julia
getpropertynested(
     nt::NamedTuple, parent_keys::Vector{Symbol}, key::Symbol, default = nothing)

Get a property key from a nested NamedTuple nt, where the property is nested to a key in parent_keys.

Useful for nested kwargs where we want to get some property in parent_keys subset (eg, model in retriever_kwargs).

Examples

julia
kw = (; abc = (; def = "x"))
 getpropertynested(kw, [:abc], :def)
-# Output: "x"

source


# PromptingTools.Experimental.RAGTools.hamming_distanceMethod.
julia
hamming_distance(
-    mat::AbstractMatrix{T}, query::AbstractVector{T})::Vector{Int} where {T <: Integer}

Calculates the column-wise Hamming distance between a matrix of binary vectors mat and a single binary vector vect.

This is the first-pass ranking for BinaryCosineSimilarity method.

Implementation from domluna's tinyRAG.

source


# PromptingTools.Experimental.RAGTools.hcat_truncateMethod.
julia
hcat_truncate(matrices::AbstractVector{<:AbstractMatrix{T}},
+# Output: "x"

source


# PromptingTools.Experimental.RAGTools.hamming_distanceMethod.
julia
hamming_distance(
+    mat::AbstractMatrix{T}, query::AbstractVector{T})::Vector{Int} where {T <: Integer}

Calculates the column-wise Hamming distance between a matrix of binary vectors mat and a single binary vector vect.

This is the first-pass ranking for BinaryCosineSimilarity method.

Implementation from domluna's tinyRAG.

source


# PromptingTools.Experimental.RAGTools.hcat_truncateMethod.
julia
hcat_truncate(matrices::AbstractVector{<:AbstractMatrix{T}},
     truncate_dimension::Union{Nothing, Int} = nothing; verbose::Bool = false) where {T <:
                                                                                      Real}

Horizontal concatenation of matrices, with optional truncation of the rows of each matrix to the specified dimension (reducing embedding dimensionality).

More efficient that a simple splatting, as the resulting matrix is pre-allocated in one go.

Returns: a Matrix{Float32}

Arguments

  • matrices::AbstractVector{<:AbstractMatrix{T}}: Vector of matrices to concatenate

  • truncate_dimension::Union{Nothing,Int}=nothing: Dimension to truncate to, or nothing or 0 to skip truncation. If truncated, the columns will be normalized.

  • verbose::Bool=false: Whether to print verbose output.

Examples

julia
a = rand(Float32, 1000, 10)
 b = rand(Float32, 1000, 20)
@@ -291,21 +291,21 @@
 size(c) # (1000, 30)
 
 d = hcat_truncate([a, b], 500)
-size(d) # (500, 30)

source


# PromptingTools.Experimental.RAGTools.load_textMethod.
julia
load_text(chunker::AbstractChunker, input;
-    kwargs...)

Load text from input using the provided chunker. Called by get_chunks.

Available chunkers:

  • FileChunker: The function opens each file in input and reads its contents.

  • TextChunker: The function assumes that input is a vector of strings to be chunked, you MUST provide corresponding sources.

source


# PromptingTools.Experimental.RAGTools.merge_kwargs_nestedMethod.
julia
merge_kwargs_nested(nt1::NamedTuple, nt2::NamedTuple)

Merges two nested NamedTuples nt1 and nt2 recursively. The nt2 values will overwrite the nt1 values when overlapping.

Example

julia
kw = (; abc = (; def = "x"))
+size(d) # (500, 30)

source


# PromptingTools.Experimental.RAGTools.load_textMethod.
julia
load_text(chunker::AbstractChunker, input;
+    kwargs...)

Load text from input using the provided chunker. Called by get_chunks.

Available chunkers:

  • FileChunker: The function opens each file in input and reads its contents.

  • TextChunker: The function assumes that input is a vector of strings to be chunked, you MUST provide corresponding sources.

source


# PromptingTools.Experimental.RAGTools.merge_kwargs_nestedMethod.
julia
merge_kwargs_nested(nt1::NamedTuple, nt2::NamedTuple)

Merges two nested NamedTuples nt1 and nt2 recursively. The nt2 values will overwrite the nt1 values when overlapping.

Example

julia
kw = (; abc = (; def = "x"))
 kw2 = (; abc = (; def = "x", def2 = 2), new = 1)
-merge_kwargs_nested(kw, kw2)

source


# PromptingTools.Experimental.RAGTools.pack_bitsMethod.
julia
pack_bits(arr::AbstractMatrix{<:Bool}) -> Matrix{UInt64}
+merge_kwargs_nested(kw, kw2)

source


# PromptingTools.Experimental.RAGTools.pack_bitsMethod.
julia
pack_bits(arr::AbstractMatrix{<:Bool}) -> Matrix{UInt64}
 pack_bits(vect::AbstractVector{<:Bool}) -> Vector{UInt64}

Pack a matrix or vector of boolean values into a more compact representation using UInt64.

Arguments (Input)

  • arr::AbstractMatrix{<:Bool}: A matrix of boolean values where the number of rows must be divisible by 64.

Returns

  • For arr::AbstractMatrix{<:Bool}: Returns a matrix of UInt64 where each element represents 64 boolean values from the original matrix.

Examples

For vectors:

julia
bin = rand(Bool, 128)
 binint = pack_bits(bin)
 binx = unpack_bits(binint)
 @assert bin == binx

For matrices:

julia
bin = rand(Bool, 128, 10)
 binint = pack_bits(bin)
 binx = unpack_bits(binint)
-@assert bin == binx

source


# PromptingTools.Experimental.RAGTools.permutation_step!Method.
julia
permutation_step!(
-    result::RankGPTResult; rank_start::Integer = 1, rank_end::Integer = 100, kwargs...)

One sub-step of the RankGPT algorithm permutation ranking within the window of chunks defined by rank_start and rank_end positions.

source


# PromptingTools.Experimental.RAGTools.preprocess_tokensFunction.
julia
preprocess_tokens(text::AbstractString, stemmer=nothing; stopwords::Union{Nothing,Set{String}}=nothing, min_length::Int=3)

Preprocess provided text by removing numbers, punctuation, and applying stemming for BM25 search index.

Returns a list of preprocessed tokens.

Example

julia
stemmer = Snowball.Stemmer("english")
+@assert bin == binx

source


# PromptingTools.Experimental.RAGTools.permutation_step!Method.
julia
permutation_step!(
+    result::RankGPTResult; rank_start::Integer = 1, rank_end::Integer = 100, kwargs...)

One sub-step of the RankGPT algorithm permutation ranking within the window of chunks defined by rank_start and rank_end positions.

source


# PromptingTools.Experimental.RAGTools.preprocess_tokensFunction.
julia
preprocess_tokens(text::AbstractString, stemmer=nothing; stopwords::Union{Nothing,Set{String}}=nothing, min_length::Int=3)

Preprocess provided text by removing numbers, punctuation, and applying stemming for BM25 search index.

Returns a list of preprocessed tokens.

Example

julia
stemmer = Snowball.Stemmer("english")
 stopwords = Set(["a", "an", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "no", "not", "of", "on", "or", "such", "some", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"])
 text = "This is a sample paragraph to test the functionality of your text preprocessor. It contains a mix of uppercase and lowercase letters, as well as punctuation marks such as commas, periods, and exclamation points! Let's see how your preprocessor handles quotes, like "this one", and also apostrophes, like in don't. Will it preserve the formatting of this paragraph, including the indentation and line breaks?"
-preprocess_tokens(text, stemmer; stopwords)

source


# PromptingTools.Experimental.RAGTools.print_htmlMethod.
julia
print_html([io::IO,] parent_node::AbstractAnnotatedNode)
+preprocess_tokens(text, stemmer; stopwords)

source


# PromptingTools.Experimental.RAGTools.print_htmlMethod.
julia
print_html([io::IO,] parent_node::AbstractAnnotatedNode)
 
 print_html([io::IO,] rag::AbstractRAGResult; add_sources::Bool = false,
     add_scores::Bool = false, default_styler = HTMLStyler(),
@@ -338,18 +338,18 @@
 
 # or to accumulate more nodes
 io = IOBuffer()
-print_html(io, parent_node)

source


# PromptingTools.Experimental.RAGTools.rank_gptMethod.
julia
rank_gpt(chunks::AbstractVector{<:AbstractString}, question::AbstractString;
+print_html(io, parent_node)

source


# PromptingTools.Experimental.RAGTools.rank_gptMethod.
julia
rank_gpt(chunks::AbstractVector{<:AbstractString}, question::AbstractString;
     verbose::Int = 1, rank_start::Integer = 1, rank_end::Integer = 100,
     window_size::Integer = 20, step::Integer = 10,
-    num_rounds::Integer = 1, model::String = "gpt4o", kwargs...)

Ranks the chunks based on their relevance for question. Returns the ranking permutation of the chunks in the order they are most relevant to the question (the first is the most relevant).

Example

julia
result = rank_gpt(chunks, question; rank_start=1, rank_end=25, window_size=8, step=4, num_rounds=3, model="gpt4o")

Reference

[1] Is ChatGPT Good at Search? Investigating Large Language Models as Re-Ranking Agents by W. Sun et al. [2] RankGPT Github

source


# PromptingTools.Experimental.RAGTools.rank_sliding_window!Method.
julia
rank_sliding_window!(
+    num_rounds::Integer = 1, model::String = "gpt4o", kwargs...)

Ranks the chunks based on their relevance for question. Returns the ranking permutation of the chunks in the order they are most relevant to the question (the first is the most relevant).

Example

julia
result = rank_gpt(chunks, question; rank_start=1, rank_end=25, window_size=8, step=4, num_rounds=3, model="gpt4o")

Reference

[1] Is ChatGPT Good at Search? Investigating Large Language Models as Re-Ranking Agents by W. Sun et al. [2] RankGPT Github

source


# PromptingTools.Experimental.RAGTools.rank_sliding_window!Method.
julia
rank_sliding_window!(
     result::RankGPTResult; verbose::Int = 1, rank_start = 1, rank_end = 100,
-    window_size = 20, step = 10, model::String = "gpt4o", kwargs...)

One single pass of the RankGPT algorithm permutation ranking across all positions between rank_start and rank_end.

source


# PromptingTools.Experimental.RAGTools.receive_permutation!Method.
julia
receive_permutation!(
+    window_size = 20, step = 10, model::String = "gpt4o", kwargs...)

One single pass of the RankGPT algorithm permutation ranking across all positions between rank_start and rank_end.

source


# PromptingTools.Experimental.RAGTools.receive_permutation!Method.
julia
receive_permutation!(
     curr_rank::AbstractVector{<:Integer}, response::AbstractString;
-    rank_start::Integer = 1, rank_end::Integer = 100)

Extracts and heals the permutation to contain all ranking positions.

source


# PromptingTools.Experimental.RAGTools.reciprocal_rank_fusionMethod.
julia
reciprocal_rank_fusion(args...; k::Int=60)

Merges multiple rankings and calculates the reciprocal rank score for each chunk (discounted by the inverse of the rank).

Example

julia
positions1 = [1, 3, 5, 7, 9]
+    rank_start::Integer = 1, rank_end::Integer = 100)

Extracts and heals the permutation to contain all ranking positions.

source


# PromptingTools.Experimental.RAGTools.reciprocal_rank_fusionMethod.
julia
reciprocal_rank_fusion(args...; k::Int=60)

Merges multiple rankings and calculates the reciprocal rank score for each chunk (discounted by the inverse of the rank).

Example

julia
positions1 = [1, 3, 5, 7, 9]
 positions2 = [2, 4, 6, 8, 10]
 positions3 = [2, 4, 6, 11, 12]
 
-merged_positions, scores = reciprocal_rank_fusion(positions1, positions2, positions3)

source


# PromptingTools.Experimental.RAGTools.reciprocal_rank_fusionMethod.
julia
reciprocal_rank_fusion(
+merged_positions, scores = reciprocal_rank_fusion(positions1, positions2, positions3)

source


# PromptingTools.Experimental.RAGTools.reciprocal_rank_fusionMethod.
julia
reciprocal_rank_fusion(
     positions1::AbstractVector{<:Integer}, scores1::AbstractVector{<:T},
     positions2::AbstractVector{<:Integer},
     scores2::AbstractVector{<:T}; k::Int = 60) where {T <: Real}

Merges two sets of rankings and their joint scores. Calculates the reciprocal rank score for each chunk (discounted by the inverse of the rank).

Example

julia
positions1 = [1, 3, 5, 7, 9]
@@ -357,15 +357,15 @@
 positions2 = [2, 4, 6, 8, 10]
 scores2 = [0.5, 0.6, 0.7, 0.8, 0.9]
 
-merged, scores = reciprocal_rank_fusion(positions1, scores1, positions2, scores2; k = 60)

source


# PromptingTools.Experimental.RAGTools.refine!Method.
julia
refine!(
+merged, scores = reciprocal_rank_fusion(positions1, scores1, positions2, scores2; k = 60)

source


# PromptingTools.Experimental.RAGTools.refine!Method.
julia
refine!(
     refiner::NoRefiner, index::AbstractChunkIndex, result::AbstractRAGResult;
-    kwargs...)

Simple no-op function for refine!. It simply copies the result.answer and result.conversations[:answer] without any changes.

source


# PromptingTools.Experimental.RAGTools.refine!Method.
julia
refine!(
+    kwargs...)

Simple no-op function for refine!. It simply copies the result.answer and result.conversations[:answer] without any changes.

source


# PromptingTools.Experimental.RAGTools.refine!Method.
julia
refine!(
     refiner::SimpleRefiner, index::AbstractDocumentIndex, result::AbstractRAGResult;
     verbose::Bool = true,
     model::AbstractString = PT.MODEL_CHAT,
     template::Symbol = :RAGAnswerRefiner,
     cost_tracker = Threads.Atomic{Float64}(0.0),
-    kwargs...)

Give model a chance to refine the answer (using the same or different context than previously provided).

This method uses the same context as the original answer, however, it can be modified to do additional retrieval and use a different context.

Returns

  • Mutated result with result.final_answer and the full conversation saved in result.conversations[:final_answer]

Arguments

  • refiner::SimpleRefiner: The method to use for refining the answer. Uses aigenerate.

  • index::AbstractDocumentIndex: The index containing chunks and sources.

  • result::AbstractRAGResult: The result containing the context and question to generate the answer for.

  • model::AbstractString: The model to use for generating the answer. Defaults to PT.MODEL_CHAT.

  • verbose::Bool: If true, enables verbose logging.

  • template::Symbol: The template to use for the aigenerate function. Defaults to :RAGAnswerRefiner.

  • cost_tracker: An atomic counter to track the cost of the operation.

source


# PromptingTools.Experimental.RAGTools.refine!Method.
julia
refine!(
+    kwargs...)

Give model a chance to refine the answer (using the same or different context than previously provided).

This method uses the same context as the original answer, however, it can be modified to do additional retrieval and use a different context.

Returns

  • Mutated result with result.final_answer and the full conversation saved in result.conversations[:final_answer]

Arguments

  • refiner::SimpleRefiner: The method to use for refining the answer. Uses aigenerate.

  • index::AbstractDocumentIndex: The index containing chunks and sources.

  • result::AbstractRAGResult: The result containing the context and question to generate the answer for.

  • model::AbstractString: The model to use for generating the answer. Defaults to PT.MODEL_CHAT.

  • verbose::Bool: If true, enables verbose logging.

  • template::Symbol: The template to use for the aigenerate function. Defaults to :RAGAnswerRefiner.

  • cost_tracker: An atomic counter to track the cost of the operation.

source


# PromptingTools.Experimental.RAGTools.refine!Method.
julia
refine!(
     refiner::TavilySearchRefiner, index::AbstractDocumentIndex, result::AbstractRAGResult;
     verbose::Bool = true,
     model::AbstractString = PT.MODEL_CHAT,
@@ -380,13 +380,13 @@
 cfg.generator.refiner = RT.TavilySearchRefiner()
 
 result = airag(cfg, index; question, return_all = true)
-pprint(result)

source


# PromptingTools.Experimental.RAGTools.rephraseMethod.
julia
rephrase(rephraser::SimpleRephraser, question::AbstractString;
+pprint(result)

source


# PromptingTools.Experimental.RAGTools.rephraseMethod.
julia
rephrase(rephraser::SimpleRephraser, question::AbstractString;
     verbose::Bool = true,
     model::String = PT.MODEL_CHAT, template::Symbol = :RAGQueryHyDE,
-    cost_tracker = Threads.Atomic{Float64}(0.0))

Rephrases the question using the provided rephraser template = RAGQueryHyDE.

Special flavor of rephrasing using HyDE (Hypothetical Document Embedding) method, which aims to find the documents most similar to a synthetic passage that would be a good answer to our question.

Returns both the original and the rephrased question.

Arguments

  • rephraser: Type that dictates the logic of rephrasing step.

  • question: The question to be rephrased.

  • model: The model to use for rephrasing. Default is PT.MODEL_CHAT.

  • template: The rephrasing template to use. Default is :RAGQueryHyDE. Find more with aitemplates("rephrase").

  • verbose: A boolean flag indicating whether to print verbose logging. Default is true.

source


# PromptingTools.Experimental.RAGTools.rephraseMethod.
julia
rephrase(rephraser::NoRephraser, question::AbstractString; kwargs...)

No-op, simple passthrough.

source


# PromptingTools.Experimental.RAGTools.rephraseMethod.
julia
rephrase(rephraser::SimpleRephraser, question::AbstractString;
+    cost_tracker = Threads.Atomic{Float64}(0.0))

Rephrases the question using the provided rephraser template = RAGQueryHyDE.

Special flavor of rephrasing using HyDE (Hypothetical Document Embedding) method, which aims to find the documents most similar to a synthetic passage that would be a good answer to our question.

Returns both the original and the rephrased question.

Arguments

  • rephraser: Type that dictates the logic of rephrasing step.

  • question: The question to be rephrased.

  • model: The model to use for rephrasing. Default is PT.MODEL_CHAT.

  • template: The rephrasing template to use. Default is :RAGQueryHyDE. Find more with aitemplates("rephrase").

  • verbose: A boolean flag indicating whether to print verbose logging. Default is true.

source


# PromptingTools.Experimental.RAGTools.rephraseMethod.
julia
rephrase(rephraser::NoRephraser, question::AbstractString; kwargs...)

No-op, simple passthrough.

source


# PromptingTools.Experimental.RAGTools.rephraseMethod.
julia
rephrase(rephraser::SimpleRephraser, question::AbstractString;
     verbose::Bool = true,
     model::String = PT.MODEL_CHAT, template::Symbol = :RAGQueryOptimizer,
-    cost_tracker = Threads.Atomic{Float64}(0.0), kwargs...)

Rephrases the question using the provided rephraser template.

Returns both the original and the rephrased question.

Arguments

  • rephraser: Type that dictates the logic of rephrasing step.

  • question: The question to be rephrased.

  • model: The model to use for rephrasing. Default is PT.MODEL_CHAT.

  • template: The rephrasing template to use. Default is :RAGQueryOptimizer. Find more with aitemplates("rephrase").

  • verbose: A boolean flag indicating whether to print verbose logging. Default is true.

source


# PromptingTools.Experimental.RAGTools.rerankMethod.
julia
rerank(
+    cost_tracker = Threads.Atomic{Float64}(0.0), kwargs...)

Rephrases the question using the provided rephraser template.

Returns both the original and the rephrased question.

Arguments

  • rephraser: Type that dictates the logic of rephrasing step.

  • question: The question to be rephrased.

  • model: The model to use for rephrasing. Default is PT.MODEL_CHAT.

  • template: The rephrasing template to use. Default is :RAGQueryOptimizer. Find more with aitemplates("rephrase").

  • verbose: A boolean flag indicating whether to print verbose logging. Default is true.

source


# PromptingTools.Experimental.RAGTools.rerankMethod.
julia
rerank(
     reranker::CohereReranker, index::AbstractDocumentIndex, question::AbstractString,
     candidates::AbstractCandidateChunks;
     verbose::Bool = false,
@@ -395,7 +395,7 @@
     model::AbstractString = "rerank-english-v3.0",
     return_documents::Bool = false,
     cost_tracker = Threads.Atomic{Float64}(0.0),
-    kwargs...)

Re-ranks a list of candidate chunks using the Cohere Rerank API. See https://cohere.com/rerank for more details.

Arguments

  • reranker: Using Cohere API

  • index: The index that holds the underlying chunks to be re-ranked.

  • question: The query to be used for the search.

  • candidates: The candidate chunks to be re-ranked.

  • top_n: The number of most relevant documents to return. Default is length(documents).

  • model: The model to use for reranking. Default is rerank-english-v3.0.

  • return_documents: A boolean flag indicating whether to return the reranked documents in the response. Default is false.

  • verbose: A boolean flag indicating whether to print verbose logging. Default is false.

  • cost_tracker: An atomic counter to track the cost of the retrieval. Not implemented /tracked (cost unclear). Provided for consistency.

source


# PromptingTools.Experimental.RAGTools.rerankMethod.
julia
rerank(
+    kwargs...)

Re-ranks a list of candidate chunks using the Cohere Rerank API. See https://cohere.com/rerank for more details.

Arguments

  • reranker: Using Cohere API

  • index: The index that holds the underlying chunks to be re-ranked.

  • question: The query to be used for the search.

  • candidates: The candidate chunks to be re-ranked.

  • top_n: The number of most relevant documents to return. Default is length(documents).

  • model: The model to use for reranking. Default is rerank-english-v3.0.

  • return_documents: A boolean flag indicating whether to return the reranked documents in the response. Default is false.

  • verbose: A boolean flag indicating whether to print verbose logging. Default is false.

  • cost_tracker: An atomic counter to track the cost of the retrieval. Not implemented /tracked (cost unclear). Provided for consistency.

source


# PromptingTools.Experimental.RAGTools.rerankMethod.
julia
rerank(
     reranker::RankGPTReranker, index::AbstractDocumentIndex, question::AbstractString,
     candidates::AbstractCandidateChunks;
     api_key::AbstractString = PT.OPENAI_API_KEY,
@@ -408,7 +408,7 @@
 question = "What are the best practices for parallel computing in Julia?"
 
 cfg = RAGConfig(; retriever = SimpleRetriever(; reranker = RT.RankGPTReranker()))
-msg = airag(cfg, index; question, return_all = true)

To get full verbosity of logs, set verbose = 5 (anything higher than 3).

julia
msg = airag(cfg, index; question, return_all = true, verbose = 5)

Reference

[1] Is ChatGPT Good at Search? Investigating Large Language Models as Re-Ranking Agents by W. Sun et al. [2] RankGPT Github

source


# PromptingTools.Experimental.RAGTools.retrieveMethod.
julia
retrieve(retriever::AbstractRetriever,
+msg = airag(cfg, index; question, return_all = true)

To get full verbosity of logs, set verbose = 5 (anything higher than 3).

julia
msg = airag(cfg, index; question, return_all = true, verbose = 5)

Reference

[1] Is ChatGPT Good at Search? Investigating Large Language Models as Re-Ranking Agents by W. Sun et al. [2] RankGPT Github

source


# PromptingTools.Experimental.RAGTools.retrieveMethod.
julia
retrieve(retriever::AbstractRetriever,
     index::AbstractChunkIndex,
     question::AbstractString;
     verbose::Integer = 1,
@@ -449,7 +449,7 @@
     rephraser_kwargs = (; model = "custom"),
     embedder_kwargs = (; model = "custom"),
     tagger_kwargs = (; model = "custom"), api_kwargs = (;
-        url = "http://localhost:8080"))

source


# PromptingTools.Experimental.RAGTools.run_qa_evalsMethod.
julia
run_qa_evals(index::AbstractChunkIndex, qa_items::AbstractVector{<:QAEvalItem};
+        url = "http://localhost:8080"))

source


# PromptingTools.Experimental.RAGTools.run_qa_evalsMethod.
julia
run_qa_evals(index::AbstractChunkIndex, qa_items::AbstractVector{<:QAEvalItem};
     api_kwargs::NamedTuple = NamedTuple(),
     airag_kwargs::NamedTuple = NamedTuple(),
     qa_evals_kwargs::NamedTuple = NamedTuple(),
@@ -464,20 +464,20 @@
 results = filter(x->!isnothing(x.answer_score), results);
 
 # See average judge score
-mean(x->x.answer_score, results)

source


# PromptingTools.Experimental.RAGTools.run_qa_evalsMethod.
julia
run_qa_evals(qa_item::QAEvalItem, ctx::RAGResult; verbose::Bool = true,
+mean(x->x.answer_score, results)

source


# PromptingTools.Experimental.RAGTools.run_qa_evalsMethod.
julia
run_qa_evals(qa_item::QAEvalItem, ctx::RAGResult; verbose::Bool = true,
              parameters_dict::Dict{Symbol, <:Any}, judge_template::Symbol = :RAGJudgeAnswerFromContext,
              model_judge::AbstractString, api_kwargs::NamedTuple = NamedTuple()) -> QAEvalResult

Evaluates a single QAEvalItem using RAG details (RAGResult) and returns a QAEvalResult structure. This function assesses the relevance and accuracy of the answers generated in a QA evaluation context.

Arguments

  • qa_item::QAEvalItem: The QA evaluation item containing the question and its answer.

  • ctx::RAGResult: The RAG result used for generating the QA pair, including the original context and the answers. Comes from airag(...; return_context=true)

  • verbose::Bool: If true, enables verbose logging. Defaults to true.

  • parameters_dict::Dict{Symbol, Any}: Track any parameters used for later evaluations. Keys must be Symbols.

  • judge_template::Symbol: The template symbol for the AI model used to judge the answer. Defaults to :RAGJudgeAnswerFromContext.

  • model_judge::AbstractString: The AI model used for judging the answer's quality. Defaults to standard chat model, but it is advisable to use more powerful model GPT-4.

  • api_kwargs::NamedTuple: Parameters that will be forwarded to the API endpoint.

Returns

QAEvalResult: An evaluation result that includes various scores and metadata related to the QA evaluation.

Notes

  • The function computes a retrieval score and rank based on how well the context matches the QA context.

  • It then uses the judge_template and model_judge to score the answer's accuracy and relevance.

  • In case of errors during evaluation, the function logs a warning (if verbose is true) and the answer_score will be set to nothing.

Examples

Evaluating a QA pair using a specific context and model:

julia
qa_item = QAEvalItem(question="What is the capital of France?", answer="Paris", context="France is a country in Europe.")
 ctx = RAGResult(source="Wikipedia", context="France is a country in Europe.", answer="Paris")
 parameters_dict = Dict("param1" => "value1", "param2" => "value2")
 
-eval_result = run_qa_evals(qa_item, ctx, parameters_dict=parameters_dict, model_judge="MyAIJudgeModel")

source


# PromptingTools.Experimental.RAGTools.score_retrieval_hitMethod.

Returns 1.0 if context overlaps or is contained within any of the candidate_context

source


# PromptingTools.Experimental.RAGTools.score_retrieval_rankMethod.

Returns Integer rank of the position where context overlaps or is contained within a candidate_context

source


# PromptingTools.Experimental.RAGTools.score_to_unit_scaleMethod.
julia
score_to_unit_scale(x::AbstractVector{T}) where T<:Real

Shift and scale a vector of scores to the unit scale [0, 1].

Example

julia
x = [1.0, 2.0, 3.0, 4.0, 5.0]
-scaled_x = score_to_unit_scale(x)

source


# PromptingTools.Experimental.RAGTools.set_node_style!Method.
julia
set_node_style!(::TrigramAnnotater, node::AnnotatedNode;
+eval_result = run_qa_evals(qa_item, ctx, parameters_dict=parameters_dict, model_judge="MyAIJudgeModel")

source


# PromptingTools.Experimental.RAGTools.score_retrieval_hitMethod.

Returns 1.0 if context overlaps or is contained within any of the candidate_context

source


# PromptingTools.Experimental.RAGTools.score_retrieval_rankMethod.

Returns Integer rank of the position where context overlaps or is contained within a candidate_context

source


# PromptingTools.Experimental.RAGTools.score_to_unit_scaleMethod.
julia
score_to_unit_scale(x::AbstractVector{T}) where T<:Real

Shift and scale a vector of scores to the unit scale [0, 1].

Example

julia
x = [1.0, 2.0, 3.0, 4.0, 5.0]
+scaled_x = score_to_unit_scale(x)

source


# PromptingTools.Experimental.RAGTools.set_node_style!Method.
julia
set_node_style!(::TrigramAnnotater, node::AnnotatedNode;
     low_threshold::Float64 = 0.0, medium_threshold::Float64 = 0.5, high_threshold::Float64 = 1.0,
     default_styler::AbstractAnnotationStyler = Styler(),
     low_styler::AbstractAnnotationStyler = Styler(color = :magenta, bold = false),
     medium_styler::AbstractAnnotationStyler = Styler(color = :blue, bold = false),
     high_styler::AbstractAnnotationStyler = Styler(color = :nothing, bold = false),
-    bold_multihits::Bool = false)

Sets style of node based on the provided rules

source


# PromptingTools.Experimental.RAGTools.setpropertynestedMethod.
julia
setpropertynested(nt::NamedTuple, parent_keys::Vector{Symbol},
+    bold_multihits::Bool = false)

Sets style of node based on the provided rules

source


# PromptingTools.Experimental.RAGTools.setpropertynestedMethod.
julia
setpropertynested(nt::NamedTuple, parent_keys::Vector{Symbol},
     key::Symbol,
     value

)

Setter for a property key in a nested NamedTuple nt, where the property is nested to a key in parent_keys.

Useful for nested kwargs where we want to change some property in parent_keys subset (eg, model in retriever_kwargs).

Examples

julia
kw = (; abc = (; def = "x"))
 setpropertynested(kw, [:abc], :def, "y")
@@ -487,12 +487,12 @@
     :model, "gpt4t")

Or changing an embedding model (across both indexer and retriever steps, because it's same step name):

julia
kwargs = setpropertynested(
         kwargs, [:embedder_kwargs],
         :model, "text-embedding-3-large"
-    )

source


# PromptingTools.Experimental.RAGTools.split_into_code_and_sentencesMethod.
julia
split_into_code_and_sentences(input::Union{String, SubString{String}})

Splits text block into code or text and sub-splits into units.

If code block, it splits by newline but keep the group_id the same (to have the same source) If text block, splits into sentences, bullets, etc., provides different group_id (to have different source)

source


# PromptingTools.Experimental.RAGTools.tags_extractMethod.
julia
tags_extract(item::Tag)
+    )

source


# PromptingTools.Experimental.RAGTools.split_into_code_and_sentencesMethod.
julia
split_into_code_and_sentences(input::Union{String, SubString{String}})

Splits text block into code or text and sub-splits into units.

If code block, it splits by newline but keep the group_id the same (to have the same source) If text block, splits into sentences, bullets, etc., provides different group_id (to have different source)

source


# PromptingTools.Experimental.RAGTools.tags_extractMethod.
julia
tags_extract(item::Tag)
 tags_extract(tags::Vector{Tag})

Extracts the Tag item into a string of the form category:::value (lowercased and spaces replaced with underscores).

Example

julia
msg = aiextract(:RAGExtractMetadataShort; return_type=MaybeTags, text="I like package DataFrames", instructions="None.")
-metadata = tags_extract(msg.content.items)

source


# PromptingTools.Experimental.RAGTools.token_with_boundariesMethod.
julia
token_with_boundaries(
+metadata = tags_extract(msg.content.items)

source


# PromptingTools.Experimental.RAGTools.token_with_boundariesMethod.
julia
token_with_boundaries(
     prev_token::Union{Nothing, AbstractString}, curr_token::AbstractString,
-    next_token::Union{Nothing, AbstractString})

Joins the three tokens together. Useful to add boundary tokens (like spaces vs brackets) to the curr_token to improve the matched context (ie, separate partial matches from exact match)

source


# PromptingTools.Experimental.RAGTools.tokenizeMethod.
julia
tokenize(input::Union{String, SubString{String}})

Tokenizes provided input by spaces, special characters or Julia symbols (eg, =>).

Unlike other tokenizers, it aims to lossless - ie, keep both the separated text and the separators.

source


# PromptingTools.Experimental.RAGTools.translate_positions_to_parentMethod.
julia
translate_positions_to_parent(index::AbstractChunkIndex, positions::AbstractVector{<:Integer})

Translate positions to the parent index. Useful to convert between positions in a view and the original index.

Used whenever a chunkdata() is used to re-align positions in case index is a view.

source


# PromptingTools.Experimental.RAGTools.translate_positions_to_parentMethod.
julia
translate_positions_to_parent(
-    index::SubChunkIndex, pos::AbstractVector{<:Integer})

Translate positions to the parent index. Useful to convert between positions in a view and the original index.

Used whenever a chunkdata() or tags() are used to re-align positions to the "parent" index.

source


# PromptingTools.Experimental.RAGTools.trigram_support!Method.
julia
trigram_support!(parent_node::AnnotatedNode,
+    next_token::Union{Nothing, AbstractString})

Joins the three tokens together. Useful to add boundary tokens (like spaces vs brackets) to the curr_token to improve the matched context (ie, separate partial matches from exact match)

source


# PromptingTools.Experimental.RAGTools.tokenizeMethod.
julia
tokenize(input::Union{String, SubString{String}})

Tokenizes provided input by spaces, special characters or Julia symbols (eg, =>).

Unlike other tokenizers, it aims to lossless - ie, keep both the separated text and the separators.

source


# PromptingTools.Experimental.RAGTools.translate_positions_to_parentMethod.
julia
translate_positions_to_parent(index::AbstractChunkIndex, positions::AbstractVector{<:Integer})

Translate positions to the parent index. Useful to convert between positions in a view and the original index.

Used whenever a chunkdata() is used to re-align positions in case index is a view.

source


# PromptingTools.Experimental.RAGTools.translate_positions_to_parentMethod.
julia
translate_positions_to_parent(
+    index::SubChunkIndex, pos::AbstractVector{<:Integer})

Translate positions to the parent index. Useful to convert between positions in a view and the original index.

Used whenever a chunkdata() or tags() are used to re-align positions to the "parent" index.

source


# PromptingTools.Experimental.RAGTools.trigram_support!Method.
julia
trigram_support!(parent_node::AnnotatedNode,
     context_trigrams::AbstractVector, trigram_func::F1 = trigrams, token_transform::F2 = identity;
     skip_trigrams::Bool = false, min_score::Float64 = 0.5,
     min_source_score::Float64 = 0.25,
@@ -501,7 +501,7 @@
 node = AnnotatedNode(content = "xyz")  trigram_support!(node, context_trigrams) # updates node.children! ```
 
 
-[source](https://github.com/svilupp/PromptingTools.jl/blob/4823e00fbf65c00479468331022bb56ae4c48eae/src/Experimental/RAGTools/annotation.jl#L215-L244)
+[source](https://github.com/svilupp/PromptingTools.jl/blob/45f9b43becbd31601223824d0459e46e7d38b0d1/src/Experimental/RAGTools/annotation.jl#L215-L244)
 
 </div>
 <br>
@@ -512,12 +512,12 @@
 
 
 ```julia
-trigrams(input_string::AbstractString; add_word::AbstractString = "")

Splits provided input_string into a vector of trigrams (combination of three consecutive characters found in the input_string).

If add_word is provided, it is added to the resulting array. Useful to add the full word itself to the resulting array for exact match.

source


# PromptingTools.Experimental.RAGTools.trigrams_hashedMethod.
julia
trigrams_hashed(input_string::AbstractString; add_word::AbstractString = "")

Splits provided input_string into a Set of hashed trigrams (combination of three consecutive characters found in the input_string).

It is more efficient for lookups in large strings (eg, >100K characters).

If add_word is provided, it is added to the resulting array to hash. Useful to add the full word itself to the resulting array for exact match.

source


# PromptingTools.last_messageMethod.
julia
PT.last_message(result::RAGResult)

Extract the last message from the RAGResult. It looks for final_answer first, then answer fields in the conversations dictionary. Returns nothing if not found.

source


# PromptingTools.last_outputMethod.

Extracts the last output (generated text answer) from the RAGResult.

source


# PromptingTools.pprintMethod.
julia
PromptingTools.pprint(
+trigrams(input_string::AbstractString; add_word::AbstractString = "")

Splits provided input_string into a vector of trigrams (combination of three consecutive characters found in the input_string).

If add_word is provided, it is added to the resulting array. Useful to add the full word itself to the resulting array for exact match.

source


# PromptingTools.Experimental.RAGTools.trigrams_hashedMethod.
julia
trigrams_hashed(input_string::AbstractString; add_word::AbstractString = "")

Splits provided input_string into a Set of hashed trigrams (combination of three consecutive characters found in the input_string).

It is more efficient for lookups in large strings (eg, >100K characters).

If add_word is provided, it is added to the resulting array to hash. Useful to add the full word itself to the resulting array for exact match.

source


# PromptingTools.last_messageMethod.
julia
PT.last_message(result::RAGResult)

Extract the last message from the RAGResult. It looks for final_answer first, then answer fields in the conversations dictionary. Returns nothing if not found.

source


# PromptingTools.last_outputMethod.

Extracts the last output (generated text answer) from the RAGResult.

source


# PromptingTools.pprintMethod.
julia
PromptingTools.pprint(
     io::IO, node::AbstractAnnotatedNode;
-    text_width::Int = displaysize(io)[2], add_newline::Bool = true)

Pretty print the node to the io stream, including all its children

Supports only node.style::Styler for now.

source


# PromptingTools.pprintMethod.
julia
PT.pprint(
+    text_width::Int = displaysize(io)[2], add_newline::Bool = true)

Pretty print the node to the io stream, including all its children

Supports only node.style::Styler for now.

source


# PromptingTools.pprintMethod.
julia
PT.pprint(
     io::IO, r::AbstractRAGResult; add_context::Bool = false,
-    text_width::Int = displaysize(io)[2], annotater_kwargs...)

Pretty print the RAG result r to the given io stream.

If add_context is true, the context will be printed as well. The text_width parameter can be used to control the width of the output.

You can provide additional keyword arguments to the annotater, eg, add_sources, add_scores, min_score, etc. See annotate_support for more details.

source


- + text_width::Int = displaysize(io)[2], annotater_kwargs...)

Pretty print the RAG result r to the given io stream.

If add_context is true, the context will be printed as well. The text_width parameter can be used to control the width of the output.

You can provide additional keyword arguments to the annotater, eg, add_sources, add_scores, min_score, etc. See annotate_support for more details.

source


+ \ No newline at end of file