From ce43c46ebb75160e8db7be60b395919473cd3a7d Mon Sep 17 00:00:00 2001 From: jonghwan Date: Tue, 17 Dec 2024 21:24:34 +0900 Subject: [PATCH] [BLCKCHN-213] add vertex ai Add support for Vertex AI in chatbot and service layers - Introduced Vertex AI as a new provider option in the chatbot and service layers. - Updated environment variable checks to include GOOGLE_PROJECT_ID. - Enhanced .gitignore to exclude additional file types. - Added VertexAIService class for handling interactions with Vertex AI. - Updated LLM interfaces and factory to accommodate Vertex AI configurations. - Implemented response generation methods for Vertex AI and updated existing services to support new functionality. --- .gitignore | 1 + ai/cryptocom-ai-agent-pychatbot/chat.py | 24 +- ai/cryptocom-ai-agent-service/.gitignore | 5 + ai/cryptocom-ai-agent-service/package.json | 4 +- .../src/services/agent/agent.interfaces.ts | 8 + .../src/services/agent/agent.service.ts | 6 + .../src/services/llm/gemini.service.ts | 9 + .../src/services/llm/llm.factory.ts | 40 ++++ .../src/services/llm/llm.interface.ts | 15 +- .../src/services/llm/openai.service.ts | 5 + .../src/services/llm/vertexai.service.ts | 209 ++++++++++++++++++ 11 files changed, 315 insertions(+), 11 deletions(-) create mode 100644 .gitignore create mode 100644 ai/cryptocom-ai-agent-service/src/services/llm/llm.factory.ts create mode 100644 ai/cryptocom-ai-agent-service/src/services/llm/vertexai.service.ts diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..dd44972 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +*.md diff --git a/ai/cryptocom-ai-agent-pychatbot/chat.py b/ai/cryptocom-ai-agent-pychatbot/chat.py index 21543fa..14c1a73 100644 --- a/ai/cryptocom-ai-agent-pychatbot/chat.py +++ b/ai/cryptocom-ai-agent-pychatbot/chat.py @@ -7,9 +7,10 @@ load_dotenv() -# Check both API keys after load_dotenv() +# Check API keys after load_dotenv() api_key = os.getenv("OPENAI_API_KEY") google_api_key = os.getenv("GOOGLE_API_KEY") +google_project_id = os.getenv("GOOGLE_PROJECT_ID") if not api_key: print("Error: OPENAI_API_KEY not found in environment variables.") @@ -25,6 +26,13 @@ else: print("✓ GOOGLE_API_KEY has been imported successfully") +if not google_project_id: + print("Error: GOOGLE_PROJECT_ID not found in environment variables.") + print("Please make sure you have set the GOOGLE_PROJECT_ID in your .env file.") + exit(1) +else: + print("✓ GOOGLE_PROJECT_ID has been imported successfully") + print() # Add blank line after checks @@ -46,6 +54,12 @@ def send_query(query: str, context: list = None, provider: str = "gemini") -> di provider_options = {} if provider == "openai": provider_options["openAI"] = {"apiKey": os.getenv("OPENAI_API_KEY")} + elif provider == "vertexai": + provider_options["vertexAI"] = { + "projectId": os.getenv("GOOGLE_PROJECT_ID"), + "location": "us-central1", + "model": "gemini-2.0-flash-exp", + } else: # gemini provider_options["gemini"] = {"apiKey": os.getenv("GOOGLE_API_KEY")} @@ -74,10 +88,12 @@ def main(): # Ask for provider choice at startup - this will be fixed for the session while True: - provider = input("Choose your AI provider (openai/gemini): ").strip().lower() - if provider in ["openai", "gemini"]: + provider = ( + input("Choose your AI provider (openai/gemini/vertexai): ").strip().lower() + ) + if provider in ["openai", "gemini", "vertexai"]: break - print("Invalid choice. Please enter 'openai' or 'gemini'") + print("Invalid choice. Please enter 'openai', 'gemini', or 'vertexai'") print("\nType 'quit' to exit") print("Use up/down arrow keys to navigate command history") diff --git a/ai/cryptocom-ai-agent-service/.gitignore b/ai/cryptocom-ai-agent-service/.gitignore index d16575d..1679af9 100644 --- a/ai/cryptocom-ai-agent-service/.gitignore +++ b/ai/cryptocom-ai-agent-service/.gitignore @@ -119,4 +119,9 @@ dist/* node_modules/* client/* .env +.env.example *.lock +*.sh +*.json +*.md +*.txt diff --git a/ai/cryptocom-ai-agent-service/package.json b/ai/cryptocom-ai-agent-service/package.json index cb1a6e8..a2ca2bf 100644 --- a/ai/cryptocom-ai-agent-service/package.json +++ b/ai/cryptocom-ai-agent-service/package.json @@ -27,7 +27,9 @@ "openai": "4.63.0", "swagger-jsdoc": "^6.2.8", "swagger-ui-express": "5.0.1", - "winston": "3.14.2" + "winston": "3.14.2", + "@google-cloud/vertexai": "^0.5.0", + "@google/generative-ai": "^0.2.0" }, "devDependencies": { "@eslint/js": "^9.11.0", diff --git a/ai/cryptocom-ai-agent-service/src/services/agent/agent.interfaces.ts b/ai/cryptocom-ai-agent-service/src/services/agent/agent.interfaces.ts index f9c2d51..51479da 100644 --- a/ai/cryptocom-ai-agent-service/src/services/agent/agent.interfaces.ts +++ b/ai/cryptocom-ai-agent-service/src/services/agent/agent.interfaces.ts @@ -1,6 +1,7 @@ export enum LLMProvider { OpenAI = 'openai', Gemini = 'gemini', + VertexAI = 'vertexai', } export interface OpenAIOptions { @@ -15,6 +16,7 @@ export interface ExplorerKeys { export interface Options { openAI?: OpenAIOptions; gemini?: GeminiOptions; + vertexAI?: VertexAIOptions; llmProvider?: LLMProvider; chainId: number; context: QueryContext[]; @@ -246,3 +248,9 @@ export interface GeminiOptions { apiKey: string; model?: string; } + +export interface VertexAIOptions { + projectId: string; + location?: string; + model?: string; +} diff --git a/ai/cryptocom-ai-agent-service/src/services/agent/agent.service.ts b/ai/cryptocom-ai-agent-service/src/services/agent/agent.service.ts index 45f3d2d..c096357 100644 --- a/ai/cryptocom-ai-agent-service/src/services/agent/agent.service.ts +++ b/ai/cryptocom-ai-agent-service/src/services/agent/agent.service.ts @@ -24,6 +24,7 @@ import { import { LLMService } from '../llm/llm.interface.js'; import { OpenAIService } from '../llm/openai.service.js'; import { GeminiService } from '../llm/gemini.service.js'; +import { VertexAIService } from '../llm/vertexai.service.js'; /** * Initialize Developer Platform SDK @@ -133,6 +134,11 @@ export class AIAgentService { throw new Error('Gemini configuration is required when using Gemini provider'); } return new GeminiService(this.options.gemini); + case LLMProvider.VertexAI: + if (!this.options.vertexAI) { + throw new Error('Vertex AI configuration is required when using Vertex AI provider'); + } + return new VertexAIService(this.options.vertexAI); default: throw new Error(`Unsupported LLM provider: ${provider}`); } diff --git a/ai/cryptocom-ai-agent-service/src/services/llm/gemini.service.ts b/ai/cryptocom-ai-agent-service/src/services/llm/gemini.service.ts index 76adf49..3e6ec40 100644 --- a/ai/cryptocom-ai-agent-service/src/services/llm/gemini.service.ts +++ b/ai/cryptocom-ai-agent-service/src/services/llm/gemini.service.ts @@ -38,6 +38,9 @@ export class GeminiService implements LLMService { private lastAssistantMessage: AIMessageResponse | null = null; constructor(config: LLMConfig) { + if (!config.apiKey) { + throw new Error('Gemini API key is required'); + } this.apiKey = config.apiKey; this.model = config.model || 'gemini-1.5-pro'; this.baseUrl = 'https://generativelanguage.googleapis.com/v1beta/models'; @@ -188,4 +191,10 @@ export class GeminiService implements LLMService { throw new Error(`Gemini API error: ${error instanceof Error ? error.message : 'Unknown error'}`); } } + + async generateResponse(context: QueryContext[]): Promise { + // Convert existing interpretUserQuery implementation to match new interface + const lastMessage = context[context.length - 1]; + return this.interpretUserQuery(lastMessage.content, context); + } } diff --git a/ai/cryptocom-ai-agent-service/src/services/llm/llm.factory.ts b/ai/cryptocom-ai-agent-service/src/services/llm/llm.factory.ts new file mode 100644 index 0000000..b75f612 --- /dev/null +++ b/ai/cryptocom-ai-agent-service/src/services/llm/llm.factory.ts @@ -0,0 +1,40 @@ +import { LLMProvider, Options } from '../agent/agent.interfaces.js'; +import { OpenAIService } from './openai.service.js'; +import { GeminiService } from './gemini.service.js'; +import { VertexAIService } from './vertexai.service.js'; +import { LLMService } from './llm.interface.js'; + +export function createLLMService(options: Options): LLMService { + switch (options.llmProvider) { + case LLMProvider.OpenAI: + if (!options.openAI?.apiKey) { + throw new Error('OpenAI API key is required'); + } + return new OpenAIService({ + apiKey: options.openAI.apiKey, + model: options.openAI.model, + }); + + case LLMProvider.Gemini: + if (!options.gemini?.apiKey) { + throw new Error('Gemini API key is required'); + } + return new GeminiService({ + apiKey: options.gemini.apiKey, + model: options.gemini.model, + }); + + case LLMProvider.VertexAI: + if (!options.vertexAI?.projectId) { + throw new Error('Vertex AI project ID is required'); + } + return new VertexAIService({ + projectId: options.vertexAI.projectId, + location: options.vertexAI.location, + model: options.vertexAI.model, + }); + + default: + throw new Error(`Unsupported LLM provider: ${options.llmProvider}`); + } +} diff --git a/ai/cryptocom-ai-agent-service/src/services/llm/llm.interface.ts b/ai/cryptocom-ai-agent-service/src/services/llm/llm.interface.ts index cc0e50c..0da3095 100644 --- a/ai/cryptocom-ai-agent-service/src/services/llm/llm.interface.ts +++ b/ai/cryptocom-ai-agent-service/src/services/llm/llm.interface.ts @@ -1,6 +1,14 @@ -import { AIMessageResponse, FunctionCallResponse, QueryContext } from '../agent/agent.interfaces.js'; +import { AIMessageResponse, QueryContext, FunctionCallResponse } from '../agent/agent.interfaces.js'; + +export interface LLMConfig { + apiKey?: string; + model?: string; + projectId?: string; + location?: string; +} export interface LLMService { + generateResponse(context: QueryContext[]): Promise; interpretUserQuery(query: string, context: QueryContext[]): Promise; generateFinalResponse( query: string, @@ -8,8 +16,3 @@ export interface LLMService { context: QueryContext[] ): Promise; } - -export interface LLMConfig { - apiKey: string; - model?: string; -} diff --git a/ai/cryptocom-ai-agent-service/src/services/llm/openai.service.ts b/ai/cryptocom-ai-agent-service/src/services/llm/openai.service.ts index 5ad5fd3..50ee697 100644 --- a/ai/cryptocom-ai-agent-service/src/services/llm/openai.service.ts +++ b/ai/cryptocom-ai-agent-service/src/services/llm/openai.service.ts @@ -113,4 +113,9 @@ export class OpenAIService implements LLMService { return 'Error generating final response'; } } + + async generateResponse(context: QueryContext[]): Promise { + const lastMessage = context[context.length - 1]; + return this.interpretUserQuery(lastMessage.content, context); + } } diff --git a/ai/cryptocom-ai-agent-service/src/services/llm/vertexai.service.ts b/ai/cryptocom-ai-agent-service/src/services/llm/vertexai.service.ts new file mode 100644 index 0000000..e0c0500 --- /dev/null +++ b/ai/cryptocom-ai-agent-service/src/services/llm/vertexai.service.ts @@ -0,0 +1,209 @@ +import { VertexAI, GenerativeModel } from '@google-cloud/vertexai'; +import { LLMConfig, LLMService } from './llm.interface.js'; +import { + AIMessageResponse, + FunctionCallResponse, + QueryContext, + Role, + BlockchainFunction, +} from '../agent/agent.interfaces.js'; +import { TOOLS } from '../agent/agent.constants.js'; +import { ChatCompletionTool } from 'openai/resources/index.js'; +import { Tool, FunctionDeclarationSchemaProperty } from '@google-cloud/vertexai/build/src/types/content'; + +// Define Vertex AI specific types +enum FunctionDeclarationSchemaType { + STRING = 'STRING', + NUMBER = 'NUMBER', + BOOLEAN = 'BOOLEAN', + OBJECT = 'OBJECT', +} + +// Define interfaces for tool conversion +interface ToolParameter { + type: string; + description: string; + enum?: string[]; +} + +export class VertexAIService implements LLMService { + private vertexai: VertexAI; + private model: GenerativeModel; + private projectId: string; + private location: string; + private modelName: string; + + constructor(config: LLMConfig) { + if (!config.projectId) { + throw new Error('Project ID is required for Vertex AI'); + } + this.projectId = config.projectId; + this.location = config.location || 'us-central1'; + this.modelName = config.model || 'gemini-1.0-pro'; + + this.vertexai = new VertexAI({ + project: this.projectId, + location: this.location, + }); + + this.model = this.vertexai.getGenerativeModel({ + model: this.modelName, + generation_config: { + temperature: 0.1, + top_p: 0.95, + max_output_tokens: 8192, + }, + tools: this.convertToolsToVertexAIFormat(TOOLS), + }); + } + + private convertToolsToVertexAIFormat(tools: ChatCompletionTool[]): Tool[] { + return [ + { + function_declarations: tools.map((tool) => { + if (!tool.function?.parameters) { + throw new Error('Invalid tool format: missing parameters'); + } + + const required = Array.isArray(tool.function.parameters.required) ? tool.function.parameters.required : []; + + return { + name: tool.function.name, + description: tool.function.description || '', + parameters: { + type: FunctionDeclarationSchemaType.OBJECT, + properties: this.convertParametersToVertexAIFormat( + tool.function.parameters.properties as Record + ), + required: required, + }, + }; + }), + }, + ]; + } + + private convertParametersToVertexAIFormat(properties: Record): { + [k: string]: FunctionDeclarationSchemaProperty; + } { + const result: { [k: string]: FunctionDeclarationSchemaProperty } = {}; + + for (const [key, value] of Object.entries(properties)) { + const type = this.mapJsonSchemaTypeToVertexAI(value.type); + + if (value.enum) { + result[key] = { + type, + description: value.description, + enum: value.enum, + }; + } else { + result[key] = { + type, + description: value.description, + }; + } + } + + return result; + } + + private mapJsonSchemaTypeToVertexAI(type: string): FunctionDeclarationSchemaType { + switch (type) { + case 'string': + return FunctionDeclarationSchemaType.STRING; + case 'number': + return FunctionDeclarationSchemaType.NUMBER; + case 'boolean': + return FunctionDeclarationSchemaType.BOOLEAN; + case 'object': + return FunctionDeclarationSchemaType.OBJECT; + default: + return FunctionDeclarationSchemaType.STRING; + } + } + + async generateResponse(context: QueryContext[]): Promise { + try { + const messages = context.map((msg) => ({ + role: msg.role === Role.System ? 'user' : msg.role.toLowerCase(), + parts: [{ text: msg.content }], + })); + + // Add a more flexible system message for general queries + messages.unshift({ + role: 'user', + parts: [ + { + text: 'You are a helpful AI assistant with knowledge about blockchain and general topics. For blockchain operations, you can interact with Ethereum and Cronos chains.', + }, + ], + }); + + const result = await this.model.generateContent({ + contents: messages, + tools: this.convertToolsToVertexAIFormat(TOOLS), + }); + + const response = result.response; + const content = response.candidates[0]?.content; + + if (!content) { + throw new Error('No content generated'); + } + + const part = content.parts[0]; + + // If no function call is made, it means it's a general question + // Return the text response directly + if (!part.functionCall) { + return { + content: part.text || '', + tool_calls: undefined, + }; + } + + // If there is a function call, return it + return { + content: '', + tool_calls: [ + { + id: '1', + type: 'function', + function: { + name: part.functionCall.name as BlockchainFunction, + arguments: JSON.stringify(part.functionCall.args), + }, + }, + ], + }; + } catch (error) { + console.error('Error generating response:', error); + throw error; + } + } + + async interpretUserQuery(query: string, context: QueryContext[]): Promise { + const fullContext = [...context, { role: Role.User, content: query }]; + return this.generateResponse(fullContext); + } + + async generateFinalResponse( + query: string, + functionResponses: FunctionCallResponse[], + context: QueryContext[] + ): Promise { + const functionResults = functionResponses + .map((response, index) => `Function ${index + 1} result: ${JSON.stringify(response)}`) + .join('\n'); + + const prompt = ` + Original query: ${query}\n + Function results: ${functionResults}\n + Please provide a natural language response based on these results. + `; + + const response = await this.interpretUserQuery(prompt, context); + return response.content; + } +}