diff --git a/docs/src/assets/vscode-insiders.png b/docs/src/assets/vscode-insiders.png
deleted file mode 100644
index 27111d9916..0000000000
Binary files a/docs/src/assets/vscode-insiders.png and /dev/null differ
diff --git a/docs/src/assets/vscode-insiders.png.txt b/docs/src/assets/vscode-insiders.png.txt
deleted file mode 100644
index 110a98c9c9..0000000000
--- a/docs/src/assets/vscode-insiders.png.txt
+++ /dev/null
@@ -1 +0,0 @@
-Icon of Visual Studio Code Insiders featuring a green folded ribbon-like shape forming a stylized letter 'V' on a white background, highlighted with a red rectangle indicating selection or focus.
diff --git a/docs/src/assets/vscode-language-models-select.png b/docs/src/assets/vscode-language-models-select.png
new file mode 100644
index 0000000000..3adcfb36c5
Binary files /dev/null and b/docs/src/assets/vscode-language-models-select.png differ
diff --git a/docs/src/assets/vscode-language-models-select.png.txt b/docs/src/assets/vscode-language-models-select.png.txt
new file mode 100644
index 0000000000..2901a6cc5b
--- /dev/null
+++ b/docs/src/assets/vscode-language-models-select.png.txt
@@ -0,0 +1 @@
+A dropdown menu titled 'Pick a Language Chat Model for openai:gpt-4' with several options including 'GPT 3.5 Turbo', 'GPT 4', 'GPT 4 Turbo (2024-01-25 Preview)', and 'GPT 4o (2024-05-13)', with 'GPT 3.5 Turbo' currently highlighted.
diff --git a/docs/src/assets/vscode-language-models.png b/docs/src/assets/vscode-language-models.png
new file mode 100644
index 0000000000..332e01b407
Binary files /dev/null and b/docs/src/assets/vscode-language-models.png differ
diff --git a/docs/src/assets/vscode-language-models.png.txt b/docs/src/assets/vscode-language-models.png.txt
new file mode 100644
index 0000000000..862c36cab2
--- /dev/null
+++ b/docs/src/assets/vscode-language-models.png.txt
@@ -0,0 +1 @@
+Screenshot of a Visual Studio Code interface showing a dropdown menu with options to configure a language model for OpenAI's GPT-4, including an option for Visual Studio Language Chat Models and using a registered LLM such as GitHub Copilot.
diff --git a/docs/src/assets/vscode-select-llm.png b/docs/src/assets/vscode-select-llm.png
deleted file mode 100644
index 63c2c02517..0000000000
Binary files a/docs/src/assets/vscode-select-llm.png and /dev/null differ
diff --git a/docs/src/assets/vscode-select-llm.png.txt b/docs/src/assets/vscode-select-llm.png.txt
deleted file mode 100644
index a3e2ed56c6..0000000000
--- a/docs/src/assets/vscode-select-llm.png.txt
+++ /dev/null
@@ -1 +0,0 @@
-A screenshot of a Visual Studio Code interface with a dialog box titled 'Pick a Language Model'. The dialog box contains a search bar and a list of options including 'Configure .env file', 'Copilot: copilot-gpt-3.5-turbo', and 'Copilot: copilot-gpt-4'. The 'Configure .env file' option is highlighted.
diff --git a/docs/src/content/docs/getting-started/configuration.mdx b/docs/src/content/docs/getting-started/configuration.mdx
index 5187ed793c..762b1d0576 100644
--- a/docs/src/content/docs/getting-started/configuration.mdx
+++ b/docs/src/content/docs/getting-started/configuration.mdx
@@ -11,11 +11,11 @@ import { Steps } from "@astrojs/starlight/components"
import { Tabs, TabItem } from "@astrojs/starlight/components"
import { Image } from "astro:assets"
-import insidersSrc from "../../../assets/vscode-insiders.png"
-import insidersAlt from "../../../assets/vscode-insiders.png.txt?raw"
+import lmSrc from "../../../assets/vscode-language-models.png"
+import lmAlt from "../../../assets/vscode-language-models.png.txt?raw"
-import selectLLMSrc from "../../../assets/vscode-select-llm.png"
-import selectLLMAlt from "../../../assets/vscode-select-llm.png.txt?raw"
+import lmSelectSrc from "../../../assets/vscode-language-models-select.png"
+import lmSelectAlt from "../../../assets/vscode-language-models-select.png.txt?raw"
You will need to configure the LLM connection and authorizion secrets.
@@ -269,6 +269,42 @@ script({
+## GitHub Copilot Models
+
+If you have access to **GitHub Copilot in Visual Studio Code**,
+GenAIScript will be able to leverage those [language models](https://code.visualstudio.com/api/extension-guides/language-model) as well.
+
+This mode is useful to run your scripts without having a separate LLM provider or local LLMs. However, those models are not available from the command line
+and have additional limitations and rate limiting defined by the GitHub Copilot platform.
+
+There is no configuration needed as long as you have GitHub Copilot installed and configured in Visual Studio Code.
+
+
+
+
+
+- run your script
+-
+select the **Visual Studio Code Language Chat Models** option when configuring the model
+
+
+
+(This step is skipped if you already have mappings in your settings)
+
+
+-
+select the best chat model that matches the one you have in your script
+
+
+
+
+
+
+
+
+
+The mapping of GenAIScript model names to Visual Studio Models is stored in the settings.
+
## Local Models
There are many projects that allow you to run models locally on your machine,
diff --git a/docs/src/content/docs/reference/token.md b/docs/src/content/docs/reference/token.md
index 34fc82e3a5..d6ce5d1a82 100644
--- a/docs/src/content/docs/reference/token.md
+++ b/docs/src/content/docs/reference/token.md
@@ -10,7 +10,7 @@ GenAIScript will try to find the connection token from various sources:
- a `.env` file in the root of your project (VSCode and CLI)
- environment variables, typically within your CI/CD environment (CLI only)
-- Visual Studio Language Models (VSCode only)
+- Visual Studio Language Chat Models (VSCode only)
## .env file or process environment
diff --git a/package.json b/package.json
index b5fefa830c..ae89c73464 100644
--- a/package.json
+++ b/package.json
@@ -59,7 +59,7 @@
"gen:licenses": "npx --yes generate-license-file --input ./package.json --output ./THIRD_PARTY_LICENSES.md --overwrite",
"genai:technical": "cd docs && yarn genai:technical",
"genai:frontmatter": "cd docs && yarn genai:frontmatter",
- "genai:alt": "cd docs && yarn genai:alt",
+ "genai:alt": "cd docs && yarn genai:alt-text",
"genai:test": "node packages/cli/built/genaiscript.cjs run test-gen"
},
"release-it": {
diff --git a/packages/cli/src/nodehost.ts b/packages/cli/src/nodehost.ts
index f1533957da..7623577bac 100644
--- a/packages/cli/src/nodehost.ts
+++ b/packages/cli/src/nodehost.ts
@@ -157,7 +157,6 @@ export class NodeHost implements RuntimeHost {
tok.token = "Bearer " + this._azureToken
}
if (!tok && this.clientLanguageModel) {
- logVerbose(`model: using client language model`)
return {
model: modelId,
provider: this.clientLanguageModel.id,
diff --git a/packages/cli/src/server.ts b/packages/cli/src/server.ts
index 36794eb1f0..2a3a16d75f 100644
--- a/packages/cli/src/server.ts
+++ b/packages/cli/src/server.ts
@@ -107,8 +107,9 @@ export async function startServer(options: { port: string }) {
// add handler
const chatId = randomHex(6)
chats[chatId] = async (chunk) => {
- if (!responseSoFar) {
- trace.itemValue("model", chunk.model)
+ if (!responseSoFar && chunk.model) {
+ logVerbose(`visual studio: chat model ${chunk.model}`)
+ trace.itemValue("chat model", chunk.model)
trace.appendContent("\n\n")
}
trace.appendToken(chunk.chunk)
diff --git a/packages/sample/.vscode/settings.json b/packages/sample/.vscode/settings.json
index a95fcadd5b..c6b2c2e0a9 100644
--- a/packages/sample/.vscode/settings.json
+++ b/packages/sample/.vscode/settings.json
@@ -4,5 +4,8 @@
"openai",
"outputfilename"
],
- "genaiscript.cli.path": "../cli/built/genaiscript.cjs"
+ "genaiscript.cli.path": "../cli/built/genaiscript.cjs",
+ "genaiscript.languageChatModels": {
+ "openai:gpt-4": "github.copilot-chat/4/gpt-4o-2024-05-13"
+ }
}
\ No newline at end of file
diff --git a/packages/vscode/package.json b/packages/vscode/package.json
index 633e6ff222..6865e30619 100644
--- a/packages/vscode/package.json
+++ b/packages/vscode/package.json
@@ -234,6 +234,10 @@
{
"title": "GenAIScript",
"properties": {
+ "genaiscript.languageChatModels": {
+ "type": "object",
+ "description": "Mapping from GenAIScript model (openai:gpt-4) to Visual Studio Code Language Chat Model (github...)"
+ },
"genaiscript.diagnostics": {
"type": "boolean",
"default": false,
diff --git a/packages/vscode/src/lmaccess.ts b/packages/vscode/src/lmaccess.ts
index 3ba4b9ca16..fc506f8c2a 100644
--- a/packages/vscode/src/lmaccess.ts
+++ b/packages/vscode/src/lmaccess.ts
@@ -1,8 +1,6 @@
/* eslint-disable @typescript-eslint/naming-convention */
import * as vscode from "vscode"
-import { AIRequestOptions, ExtensionState } from "./state"
-import { isApiProposalEnabled } from "./proposals"
-import { LanguageModel } from "../../core/src/chat"
+import { ExtensionState } from "./state"
import {
MODEL_PROVIDER_OLLAMA,
MODEL_PROVIDER_LLAMAFILE,
@@ -34,7 +32,8 @@ async function generateLanguageModelConfiguration(
return { provider }
}
- if (Object.keys(state.languageChatModels).length)
+ const languageChatModels = await state.languageChatModels()
+ if (Object.keys(languageChatModels).length)
return { provider: MODEL_PROVIDER_CLIENT, model: "*" }
const items: (vscode.QuickPickItem & {
@@ -46,8 +45,8 @@ async function generateLanguageModelConfiguration(
const models = await vscode.lm.selectChatModels()
if (models.length)
items.push({
- label: "Visual Studio Language Models",
- detail: `Use a registered Language Model (e.g. GitHub Copilot).`,
+ label: "Visual Studio Language Chat Models",
+ detail: `Use a registered LLM such as GitHub Copilot.`,
model: "*",
provider: MODEL_PROVIDER_CLIENT,
})
@@ -104,8 +103,8 @@ async function pickChatModel(
model: string
): Promise {
const chatModels = await vscode.lm.selectChatModels()
-
- const chatModelId = state.languageChatModels[model]
+ const languageChatModels = await state.languageChatModels()
+ const chatModelId = languageChatModels[model]
let chatModel = chatModelId && chatModels.find((m) => m.id === chatModelId)
if (!chatModel) {
const items: (vscode.QuickPickItem & {
@@ -117,10 +116,10 @@ async function pickChatModel(
chatModel,
}))
const res = await vscode.window.showQuickPick(items, {
- title: `Pick a Chat Model for ${model}`,
+ title: `Pick a Language Chat Model for ${model}`,
})
chatModel = res?.chatModel
- if (chatModel) state.languageChatModels[model] = chatModel.id
+ if (chatModel) await state.updateLanguageChatModels(model, chatModel.id)
}
return chatModel
}
diff --git a/packages/vscode/src/servermanager.ts b/packages/vscode/src/servermanager.ts
index c717a94243..8890b46178 100644
--- a/packages/vscode/src/servermanager.ts
+++ b/packages/vscode/src/servermanager.ts
@@ -39,7 +39,7 @@ export class TerminalServerManager implements ServerManager {
)
subscriptions.push(
vscode.workspace.onDidChangeConfiguration((e) => {
- if (e.affectsConfiguration(TOOL_ID)) this.close()
+ if (e.affectsConfiguration(TOOL_ID + ".cli")) this.close()
})
)
diff --git a/packages/vscode/src/state.ts b/packages/vscode/src/state.ts
index cd9d58a9bb..46804c814c 100644
--- a/packages/vscode/src/state.ts
+++ b/packages/vscode/src/state.ts
@@ -107,8 +107,6 @@ export class ExtensionState extends EventTarget {
AIRequestSnapshot
> = undefined
readonly output: vscode.LogOutputChannel
- // modelid -> vscode language mode id
- languageChatModels: Record = {}
constructor(public readonly context: ExtensionContext) {
super()
@@ -138,15 +136,26 @@ export class ExtensionState extends EventTarget {
subscriptions
)
)
- if (
- typeof vscode.lm !== "undefined" &&
- typeof vscode.lm.onDidChangeChatModels === "function"
- )
- subscriptions.push(
- vscode.lm.onDidChangeChatModels(
- () => (this.languageChatModels = {})
- )
- )
+ }
+
+ async updateLanguageChatModels(model: string, chatModel: string) {
+ const res = await this.languageChatModels()
+ if (res[model] !== chatModel) {
+ if (chatModel === undefined) delete res[model]
+ else res[model] = chatModel
+ const config = vscode.workspace.getConfiguration(TOOL_ID)
+ await config.update("languageChatModels", res)
+ }
+ }
+
+ async languageChatModels() {
+ const config = vscode.workspace.getConfiguration(TOOL_ID)
+ const res =
+ ((await config.get("languageChatModels")) as Record<
+ string,
+ string
+ >) || {}
+ return res
}
private async saveScripts() {