From b69dce36801eeadb6e657b5651305ae3cad6b236 Mon Sep 17 00:00:00 2001 From: BrandonStudio <55647556+BrandonStudio@users.noreply.github.com> Date: Wed, 18 Dec 2024 12:25:09 +0800 Subject: [PATCH 1/3] =?UTF-8?q?=F0=9F=90=9B=20fix:=20Fix=20GitHub=20model?= =?UTF-8?q?=20fetch=20(#4645)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix GitHub models Refactor OpenAICompatibleFactory * Restore unnecessary change * Complement test --- src/config/modelProviders/github.ts | 29 +++-- src/libs/agent-runtime/github/index.test.ts | 109 +++++++++++------- src/libs/agent-runtime/github/index.ts | 52 ++++++++- src/libs/agent-runtime/togetherai/index.ts | 3 +- .../utils/openaiCompatibleFactory/index.ts | 19 +-- 5 files changed, 150 insertions(+), 62 deletions(-) diff --git a/src/config/modelProviders/github.ts b/src/config/modelProviders/github.ts index 0394b2b64d88..e23b0a22b05c 100644 --- a/src/config/modelProviders/github.ts +++ b/src/config/modelProviders/github.ts @@ -15,7 +15,8 @@ const Github: ModelProviderCard = { vision: true, }, { - description: '专注于高级推理和解决复杂问题,包括数学和科学任务。非常适合需要深度上下文理解和自主工作流程的应用。', + description: + '专注于高级推理和解决复杂问题,包括数学和科学任务。非常适合需要深度上下文理解和自主工作流程的应用。', displayName: 'OpenAI o1-preview', enabled: true, functionCall: false, @@ -45,7 +46,8 @@ const Github: ModelProviderCard = { vision: true, }, { - description: '一个52B参数(12B活跃)的多语言模型,提供256K长上下文窗口、函数调用、结构化输出和基于事实的生成。', + description: + '一个52B参数(12B活跃)的多语言模型,提供256K长上下文窗口、函数调用、结构化输出和基于事实的生成。', displayName: 'AI21 Jamba 1.5 Mini', functionCall: true, id: 'ai21-jamba-1.5-mini', @@ -53,7 +55,8 @@ const Github: ModelProviderCard = { tokens: 262_144, }, { - description: '一个398B参数(94B活跃)的多语言模型,提供256K长上下文窗口、函数调用、结构化输出和基于事实的生成。', + description: + '一个398B参数(94B活跃)的多语言模型,提供256K长上下文窗口、函数调用、结构化输出和基于事实的生成。', displayName: 'AI21 Jamba 1.5 Large', functionCall: true, id: 'ai21-jamba-1.5-large', @@ -61,7 +64,8 @@ const Github: ModelProviderCard = { tokens: 262_144, }, { - description: 'Command R是一个可扩展的生成模型,旨在针对RAG和工具使用,使企业能够实现生产级AI。', + description: + 'Command R是一个可扩展的生成模型,旨在针对RAG和工具使用,使企业能够实现生产级AI。', displayName: 'Cohere Command R', id: 'cohere-command-r', maxOutput: 4096, @@ -75,7 +79,8 @@ const Github: ModelProviderCard = { tokens: 131_072, }, { - description: 'Mistral Nemo是一种尖端的语言模型(LLM),在其尺寸类别中拥有最先进的推理、世界知识和编码能力。', + description: + 'Mistral Nemo是一种尖端的语言模型(LLM),在其尺寸类别中拥有最先进的推理、世界知识和编码能力。', displayName: 'Mistral Nemo', id: 'mistral-nemo', maxOutput: 4096, @@ -89,7 +94,8 @@ const Github: ModelProviderCard = { tokens: 131_072, }, { - description: 'Mistral的旗舰模型,适合需要大规模推理能力或高度专业化的复杂任务(合成文本生成、代码生成、RAG或代理)。', + description: + 'Mistral的旗舰模型,适合需要大规模推理能力或高度专业化的复杂任务(合成文本生成、代码生成、RAG或代理)。', displayName: 'Mistral Large', id: 'mistral-large', maxOutput: 4096, @@ -112,21 +118,24 @@ const Github: ModelProviderCard = { vision: true, }, { - description: 'Llama 3.1指令调优的文本模型,针对多语言对话用例进行了优化,在许多可用的开源和封闭聊天模型中,在常见行业基准上表现优异。', + description: + 'Llama 3.1指令调优的文本模型,针对多语言对话用例进行了优化,在许多可用的开源和封闭聊天模型中,在常见行业基准上表现优异。', displayName: 'Meta Llama 3.1 8B', id: 'meta-llama-3.1-8b-instruct', maxOutput: 4096, tokens: 131_072, }, { - description: 'Llama 3.1指令调优的文本模型,针对多语言对话用例进行了优化,在许多可用的开源和封闭聊天模型中,在常见行业基准上表现优异。', + description: + 'Llama 3.1指令调优的文本模型,针对多语言对话用例进行了优化,在许多可用的开源和封闭聊天模型中,在常见行业基准上表现优异。', displayName: 'Meta Llama 3.1 70B', id: 'meta-llama-3.1-70b-instruct', maxOutput: 4096, tokens: 131_072, }, { - description: 'Llama 3.1指令调优的文本模型,针对多语言对话用例进行了优化,在许多可用的开源和封闭聊天模型中,在常见行业基准上表现优异。', + description: + 'Llama 3.1指令调优的文本模型,针对多语言对话用例进行了优化,在许多可用的开源和封闭聊天模型中,在常见行业基准上表现优异。', displayName: 'Meta Llama 3.1 405B', id: 'meta-llama-3.1-405b-instruct', maxOutput: 4096, @@ -209,7 +218,7 @@ const Github: ModelProviderCard = { description: '通过GitHub模型,开发人员可以成为AI工程师,并使用行业领先的AI模型进行构建。', enabled: true, id: 'github', - // modelList: { showModelFetcher: true }, + modelList: { showModelFetcher: true }, // I'm not sure if it is good to show the model fetcher, as remote list is not complete. name: 'GitHub', url: 'https://github.com/marketplace/models', }; diff --git a/src/libs/agent-runtime/github/index.test.ts b/src/libs/agent-runtime/github/index.test.ts index e466ac155389..346345b170de 100644 --- a/src/libs/agent-runtime/github/index.test.ts +++ b/src/libs/agent-runtime/github/index.test.ts @@ -21,15 +21,10 @@ let instance: LobeOpenAICompatibleRuntime; beforeEach(() => { instance = new LobeGithubAI({ apiKey: 'test' }); - - // Use vi.spyOn to mock the chat.completions.create method - vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue( - new ReadableStream() as any, - ); }); afterEach(() => { - vi.clearAllMocks(); + vi.restoreAllMocks(); }); describe('LobeGithubAI', () => { @@ -42,6 +37,13 @@ describe('LobeGithubAI', () => { }); describe('chat', () => { + beforeEach(() => { + // Use vi.spyOn to mock the chat.completions.create method + vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue( + new ReadableStream() as any, + ); + }); + describe('Error', () => { it('should return GithubBizError with an openai error response when OpenAI.APIError is thrown', async () => { // Arrange @@ -119,41 +121,6 @@ describe('LobeGithubAI', () => { } }); - it('should return GithubBizError with an cause response with desensitize Url', async () => { - // Arrange - const errorInfo = { - stack: 'abc', - cause: { message: 'api is undefined' }, - }; - const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {}); - - instance = new LobeGithubAI({ - apiKey: 'test', - baseURL: 'https://api.abc.com/v1', - }); - - vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError); - - // Act - try { - await instance.chat({ - messages: [{ content: 'Hello', role: 'user' }], - model: 'meta-llama-3-70b-instruct', - temperature: 0.7, - }); - } catch (e) { - expect(e).toEqual({ - endpoint: 'https://api.***.com/v1', - error: { - cause: { message: 'api is undefined' }, - stack: 'abc', - }, - errorType: bizErrorType, - provider, - }); - } - }); - it('should throw an InvalidGithubToken error type on 401 status code', async () => { // Mock the API call to simulate a 401 error const error = new Error('InvalidApiKey') as any; @@ -243,4 +210,64 @@ describe('LobeGithubAI', () => { }); }); }); + + describe('models', () => { + beforeEach(() => {}); + + it('should return a list of models', async () => { + // Arrange + const arr = [ + { + id: 'azureml://registries/azureml-ai21/models/AI21-Jamba-Instruct/versions/2', + name: 'AI21-Jamba-Instruct', + friendly_name: 'AI21-Jamba-Instruct', + model_version: 2, + publisher: 'AI21 Labs', + model_family: 'AI21 Labs', + model_registry: 'azureml-ai21', + license: 'custom', + task: 'chat-completion', + description: + "Jamba-Instruct is the world's first production-grade Mamba-based LLM model and leverages its hybrid Mamba-Transformer architecture to achieve best-in-class performance, quality, and cost efficiency.\n\n**Model Developer Name**: _AI21 Labs_\n\n## Model Architecture\n\nJamba-Instruct leverages a hybrid Mamba-Transformer architecture to achieve best-in-class performance, quality, and cost efficiency.\nAI21's Jamba architecture features a blocks-and-layers approach that allows Jamba to successfully integrate the two architectures. Each Jamba block contains either an attention or a Mamba layer, followed by a multi-layer perceptron (MLP), producing an overall ratio of one Transformer layer out of every eight total layers.\n", + summary: + "Jamba-Instruct is the world's first production-grade Mamba-based LLM model and leverages its hybrid Mamba-Transformer architecture to achieve best-in-class performance, quality, and cost efficiency.", + tags: ['chat', 'rag'], + }, + { + id: 'azureml://registries/azureml-cohere/models/Cohere-command-r/versions/3', + name: 'Cohere-command-r', + friendly_name: 'Cohere Command R', + model_version: 3, + publisher: 'cohere', + model_family: 'cohere', + model_registry: 'azureml-cohere', + license: 'custom', + task: 'chat-completion', + description: + "Command R is a highly performant generative large language model, optimized for a variety of use cases including reasoning, summarization, and question answering. \n\nThe model is optimized to perform well in the following languages: English, French, Spanish, Italian, German, Brazilian Portuguese, Japanese, Korean, Simplified Chinese, and Arabic.\n\nPre-training data additionally included the following 13 languages: Russian, Polish, Turkish, Vietnamese, Dutch, Czech, Indonesian, Ukrainian, Romanian, Greek, Hindi, Hebrew, Persian.\n\n## Resources\n\nFor full details of this model, [release blog post](https://aka.ms/cohere-blog).\n\n## Model Architecture\n\nThis is an auto-regressive language model that uses an optimized transformer architecture. After pretraining, this model uses supervised fine-tuning (SFT) and preference training to align model behavior to human preferences for helpfulness and safety.\n\n### Tool use capabilities\n\nCommand R has been specifically trained with conversational tool use capabilities. These have been trained into the model via a mixture of supervised fine-tuning and preference fine-tuning, using a specific prompt template. Deviating from this prompt template will likely reduce performance, but we encourage experimentation.\n\nCommand R's tool use functionality takes a conversation as input (with an optional user-system preamble), along with a list of available tools. The model will then generate a json-formatted list of actions to execute on a subset of those tools. Command R may use one of its supplied tools more than once.\n\nThe model has been trained to recognise a special directly_answer tool, which it uses to indicate that it doesn't want to use any of its other tools. The ability to abstain from calling a specific tool can be useful in a range of situations, such as greeting a user, or asking clarifying questions. We recommend including the directly_answer tool, but it can be removed or renamed if required.\n\n### Grounded Generation and RAG Capabilities\n\nCommand R has been specifically trained with grounded generation capabilities. This means that it can generate responses based on a list of supplied document snippets, and it will include grounding spans (citations) in its response indicating the source of the information. This can be used to enable behaviors such as grounded summarization and the final step of Retrieval Augmented Generation (RAG).This behavior has been trained into the model via a mixture of supervised fine-tuning and preference fine-tuning, using a specific prompt template. Deviating from this prompt template may reduce performance, but we encourage experimentation.\n\nCommand R's grounded generation behavior takes a conversation as input (with an optional user-supplied system preamble, indicating task, context and desired output style), along with a list of retrieved document snippets. The document snippets should be chunks, rather than long documents, typically around 100-400 words per chunk. Document snippets consist of key-value pairs. The keys should be short descriptive strings, the values can be text or semi-structured.\n\nBy default, Command R will generate grounded responses by first predicting which documents are relevant, then predicting which ones it will cite, then generating an answer. Finally, it will then insert grounding spans into the answer. See below for an example. This is referred to as accurate grounded generation.\n\nThe model is trained with a number of other answering modes, which can be selected by prompt changes . A fast citation mode is supported in the tokenizer, which will directly generate an answer with grounding spans in it, without first writing the answer out in full. This sacrifices some grounding accuracy in favor of generating fewer tokens.\n\n### Code Capabilities\n\nCommand R has been optimized to interact with your code, by requesting code snippets, code explanations, or code rewrites. It might not perform well out-of-the-box for pure code completion. For better performance, we also recommend using a low temperature (and even greedy decoding) for code-generation related instructions.\n", + summary: + 'Command R is a scalable generative model targeting RAG and Tool Use to enable production-scale AI for enterprise.', + tags: ['rag', 'multilingual'], + }, + ]; + vi.spyOn(instance['client'].models, 'list').mockResolvedValue({ + body: arr, + } as any); + + // Act & Assert + const models = await instance.models(); + + const modelsCount = models.length; + expect(modelsCount).toBe(arr.length); + + for (let i = 0; i < arr.length; i++) { + const model = models[i]; + expect(model).toEqual({ + description: arr[i].description, + displayName: arr[i].friendly_name, + id: arr[i].name, + }); + } + }); + }); }); diff --git a/src/libs/agent-runtime/github/index.ts b/src/libs/agent-runtime/github/index.ts index 2612bc6fe697..7081a73043ef 100644 --- a/src/libs/agent-runtime/github/index.ts +++ b/src/libs/agent-runtime/github/index.ts @@ -1,7 +1,35 @@ +import { LOBE_DEFAULT_MODEL_LIST } from '@/config/modelProviders'; +import type { ChatModelCard } from '@/types/llm'; + import { AgentRuntimeErrorType } from '../error'; import { o1Models, pruneO1Payload } from '../openai'; import { ModelProvider } from '../types'; -import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory'; +import { + CHAT_MODELS_BLOCK_LIST, + LobeOpenAICompatibleFactory, +} from '../utils/openaiCompatibleFactory'; + +enum Task { + 'chat-completion', + 'embeddings', +} + +/* eslint-disable typescript-sort-keys/interface */ +type Model = { + id: string; + name: string; + friendly_name: string; + model_version: number; + publisher: string; + model_family: string; + model_registry: string; + license: string; + task: Task; + description: string; + summary: string; + tags: string[]; +}; +/* eslint-enable typescript-sort-keys/interface */ export const LobeGithubAI = LobeOpenAICompatibleFactory({ baseURL: 'https://models.inference.ai.azure.com', @@ -23,5 +51,27 @@ export const LobeGithubAI = LobeOpenAICompatibleFactory({ bizError: AgentRuntimeErrorType.ProviderBizError, invalidAPIKey: AgentRuntimeErrorType.InvalidGithubToken, }, + models: async ({ client }) => { + const modelsPage = (await client.models.list()) as any; + const modelList: Model[] = modelsPage.body; + return modelList + .filter((model) => { + return CHAT_MODELS_BLOCK_LIST.every( + (keyword) => !model.name.toLowerCase().includes(keyword), + ); + }) + .map((model) => { + const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => m.id === model.name); + + if (knownModel) return knownModel; + + return { + description: model.description, + displayName: model.friendly_name, + id: model.name, + }; + }) + .filter(Boolean) as ChatModelCard[]; + }, provider: ModelProvider.Github, }); diff --git a/src/libs/agent-runtime/togetherai/index.ts b/src/libs/agent-runtime/togetherai/index.ts index b291aa5faad7..73b06cbcd314 100644 --- a/src/libs/agent-runtime/togetherai/index.ts +++ b/src/libs/agent-runtime/togetherai/index.ts @@ -16,7 +16,8 @@ export const LobeTogetherAI = LobeOpenAICompatibleFactory({ debug: { chatCompletion: () => process.env.DEBUG_TOGETHERAI_CHAT_COMPLETION === '1', }, - models: async ({ apiKey }) => { + models: async ({ client }) => { + const apiKey = client.apiKey; const data = await fetch(`${baseURL}/api/models`, { headers: { Authorization: `Bearer ${apiKey}`, diff --git a/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts b/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts index ee2990682ecf..df80bf4c4a3b 100644 --- a/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +++ b/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts @@ -2,17 +2,18 @@ import OpenAI, { ClientOptions } from 'openai'; import { Stream } from 'openai/streaming'; import { LOBE_DEFAULT_MODEL_LIST } from '@/config/modelProviders'; -import { ChatModelCard } from '@/types/llm'; +import type { ChatModelCard } from '@/types/llm'; import { LobeRuntimeAI } from '../../BaseAI'; import { AgentRuntimeErrorType, ILobeAgentRuntimeErrorType } from '../../error'; -import { +import type { ChatCompetitionOptions, ChatCompletionErrorPayload, ChatStreamPayload, Embeddings, EmbeddingsOptions, EmbeddingsPayload, + ModelProvider, TextToImagePayload, TextToSpeechOptions, TextToSpeechPayload, @@ -26,7 +27,7 @@ import { StreamingResponse } from '../response'; import { OpenAIStream, OpenAIStreamOptions } from '../streams'; // the model contains the following keywords is not a chat model, so we should filter them out -const CHAT_MODELS_BLOCK_LIST = [ +export const CHAT_MODELS_BLOCK_LIST = [ 'embedding', 'davinci', 'curie', @@ -77,7 +78,7 @@ interface OpenAICompatibleFactoryOptions = any> { invalidAPIKey: ILobeAgentRuntimeErrorType; }; models?: - | ((params: { apiKey: string }) => Promise) + | ((params: { client: OpenAI }) => Promise) | { transformModel?: (model: OpenAI.Model) => ChatModelCard; }; @@ -157,7 +158,7 @@ export const LobeOpenAICompatibleFactory = = any> client!: OpenAI; baseURL!: string; - private _options: ConstructorOptions; + protected _options: ConstructorOptions; constructor(options: ClientOptions & Record = {}) { const _options = { @@ -249,7 +250,7 @@ export const LobeOpenAICompatibleFactory = = any> } async models() { - if (typeof models === 'function') return models({ apiKey: this.client.apiKey }); + if (typeof models === 'function') return models({ client: this.client }); const list = await this.client.models.list(); @@ -312,7 +313,7 @@ export const LobeOpenAICompatibleFactory = = any> } } - private handleError(error: any): ChatCompletionErrorPayload { + protected handleError(error: any): ChatCompletionErrorPayload { let desensitizedEndpoint = this.baseURL; // refs: https://github.com/lobehub/lobe-chat/issues/842 @@ -337,7 +338,7 @@ export const LobeOpenAICompatibleFactory = = any> endpoint: desensitizedEndpoint, error: error as any, errorType: ErrorType.invalidAPIKey, - provider: provider as any, + provider: provider as ModelProvider, }); } @@ -353,7 +354,7 @@ export const LobeOpenAICompatibleFactory = = any> endpoint: desensitizedEndpoint, error: errorResult, errorType: RuntimeError || ErrorType.bizError, - provider: provider as any, + provider: provider as ModelProvider, }); } }; From 01545060cecd83d368e183832c9131ea4e3fedaf Mon Sep 17 00:00:00 2001 From: semantic-release-bot Date: Wed, 18 Dec 2024 04:32:58 +0000 Subject: [PATCH 2/3] :bookmark: chore(release): v1.36.33 [skip ci] MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### [Version 1.36.33](https://github.com/lobehub/lobe-chat/compare/v1.36.32...v1.36.33) Released on **2024-12-18** #### 🐛 Bug Fixes - **misc**: Fix GitHub model fetch.
Improvements and Fixes #### What's fixed * **misc**: Fix GitHub model fetch, closes [#4645](https://github.com/lobehub/lobe-chat/issues/4645) ([b69dce3](https://github.com/lobehub/lobe-chat/commit/b69dce3))
[![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
--- CHANGELOG.md | 25 +++++++++++++++++++++++++ package.json | 2 +- 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4d168b97a371..5ef5f8afd653 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,31 @@ # Changelog +### [Version 1.36.33](https://github.com/lobehub/lobe-chat/compare/v1.36.32...v1.36.33) + +Released on **2024-12-18** + +#### 🐛 Bug Fixes + +- **misc**: Fix GitHub model fetch. + +
+ +
+Improvements and Fixes + +#### What's fixed + +- **misc**: Fix GitHub model fetch, closes [#4645](https://github.com/lobehub/lobe-chat/issues/4645) ([b69dce3](https://github.com/lobehub/lobe-chat/commit/b69dce3)) + +
+ +
+ +[![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top) + +
+ ### [Version 1.36.32](https://github.com/lobehub/lobe-chat/compare/v1.36.31...v1.36.32) Released on **2024-12-17** diff --git a/package.json b/package.json index d16eb9c0865c..80085036174d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@lobehub/chat", - "version": "1.36.32", + "version": "1.36.33", "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.", "keywords": [ "framework", From 17778160ccaab792bb9a96366ae996632e6a8f2a Mon Sep 17 00:00:00 2001 From: lobehubbot Date: Wed, 18 Dec 2024 04:33:54 +0000 Subject: [PATCH 3/3] =?UTF-8?q?=F0=9F=93=9D=20docs(bot):=20Auto=20sync=20a?= =?UTF-8?q?gents=20&=20plugin=20to=20readme?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- changelog/v1.json | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/changelog/v1.json b/changelog/v1.json index a5f072523ca4..18f5bd33fd2b 100644 --- a/changelog/v1.json +++ b/changelog/v1.json @@ -1,4 +1,11 @@ [ + { + "children": { + "fixes": ["Fix GitHub model fetch."] + }, + "date": "2024-12-18", + "version": "1.36.33" + }, { "children": { "improvements": ["Refactor the drizzle code style."]