Skip to content

Commit

Permalink
[BLCKCHN-213] add vertex ai
Browse files Browse the repository at this point in the history
Add support for Vertex AI in chatbot and service layers

- Introduced Vertex AI as a new provider option in the chatbot and service layers.
- Updated environment variable checks to include GOOGLE_PROJECT_ID.
- Enhanced .gitignore to exclude additional file types.
- Added VertexAIService class for handling interactions with Vertex AI.
- Updated LLM interfaces and factory to accommodate Vertex AI configurations.
- Implemented response generation methods for Vertex AI and updated existing services to support new functionality.
  • Loading branch information
leejw51crypto committed Dec 17, 2024
1 parent 0312ff0 commit ce43c46
Show file tree
Hide file tree
Showing 11 changed files with 315 additions and 11 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
*.md
24 changes: 20 additions & 4 deletions ai/cryptocom-ai-agent-pychatbot/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,10 @@

load_dotenv()

# Check both API keys after load_dotenv()
# Check API keys after load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
google_api_key = os.getenv("GOOGLE_API_KEY")
google_project_id = os.getenv("GOOGLE_PROJECT_ID")

if not api_key:
print("Error: OPENAI_API_KEY not found in environment variables.")
Expand All @@ -25,6 +26,13 @@
else:
print("✓ GOOGLE_API_KEY has been imported successfully")

if not google_project_id:
print("Error: GOOGLE_PROJECT_ID not found in environment variables.")
print("Please make sure you have set the GOOGLE_PROJECT_ID in your .env file.")
exit(1)
else:
print("✓ GOOGLE_PROJECT_ID has been imported successfully")

print() # Add blank line after checks


Expand All @@ -46,6 +54,12 @@ def send_query(query: str, context: list = None, provider: str = "gemini") -> di
provider_options = {}
if provider == "openai":
provider_options["openAI"] = {"apiKey": os.getenv("OPENAI_API_KEY")}
elif provider == "vertexai":
provider_options["vertexAI"] = {
"projectId": os.getenv("GOOGLE_PROJECT_ID"),
"location": "us-central1",
"model": "gemini-2.0-flash-exp",
}
else: # gemini
provider_options["gemini"] = {"apiKey": os.getenv("GOOGLE_API_KEY")}

Expand Down Expand Up @@ -74,10 +88,12 @@ def main():

# Ask for provider choice at startup - this will be fixed for the session
while True:
provider = input("Choose your AI provider (openai/gemini): ").strip().lower()
if provider in ["openai", "gemini"]:
provider = (
input("Choose your AI provider (openai/gemini/vertexai): ").strip().lower()
)
if provider in ["openai", "gemini", "vertexai"]:
break
print("Invalid choice. Please enter 'openai' or 'gemini'")
print("Invalid choice. Please enter 'openai', 'gemini', or 'vertexai'")

print("\nType 'quit' to exit")
print("Use up/down arrow keys to navigate command history")
Expand Down
5 changes: 5 additions & 0 deletions ai/cryptocom-ai-agent-service/.gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -119,4 +119,9 @@ dist/*
node_modules/*
client/*
.env
.env.example
*.lock
*.sh
*.json
*.md
*.txt
4 changes: 3 additions & 1 deletion ai/cryptocom-ai-agent-service/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,9 @@
"openai": "4.63.0",
"swagger-jsdoc": "^6.2.8",
"swagger-ui-express": "5.0.1",
"winston": "3.14.2"
"winston": "3.14.2",
"@google-cloud/vertexai": "^0.5.0",
"@google/generative-ai": "^0.2.0"
},
"devDependencies": {
"@eslint/js": "^9.11.0",
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
export enum LLMProvider {
OpenAI = 'openai',
Gemini = 'gemini',
VertexAI = 'vertexai',
}

export interface OpenAIOptions {
Expand All @@ -15,6 +16,7 @@ export interface ExplorerKeys {
export interface Options {
openAI?: OpenAIOptions;
gemini?: GeminiOptions;
vertexAI?: VertexAIOptions;
llmProvider?: LLMProvider;
chainId: number;
context: QueryContext[];
Expand Down Expand Up @@ -246,3 +248,9 @@ export interface GeminiOptions {
apiKey: string;
model?: string;
}

export interface VertexAIOptions {
projectId: string;
location?: string;
model?: string;
}
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import {
import { LLMService } from '../llm/llm.interface.js';
import { OpenAIService } from '../llm/openai.service.js';
import { GeminiService } from '../llm/gemini.service.js';
import { VertexAIService } from '../llm/vertexai.service.js';

/**
* Initialize Developer Platform SDK
Expand Down Expand Up @@ -133,6 +134,11 @@ export class AIAgentService {
throw new Error('Gemini configuration is required when using Gemini provider');
}
return new GeminiService(this.options.gemini);
case LLMProvider.VertexAI:
if (!this.options.vertexAI) {
throw new Error('Vertex AI configuration is required when using Vertex AI provider');
}
return new VertexAIService(this.options.vertexAI);
default:
throw new Error(`Unsupported LLM provider: ${provider}`);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,9 @@ export class GeminiService implements LLMService {
private lastAssistantMessage: AIMessageResponse | null = null;

constructor(config: LLMConfig) {
if (!config.apiKey) {
throw new Error('Gemini API key is required');
}
this.apiKey = config.apiKey;
this.model = config.model || 'gemini-1.5-pro';
this.baseUrl = 'https://generativelanguage.googleapis.com/v1beta/models';
Expand Down Expand Up @@ -188,4 +191,10 @@ export class GeminiService implements LLMService {
throw new Error(`Gemini API error: ${error instanceof Error ? error.message : 'Unknown error'}`);
}
}

async generateResponse(context: QueryContext[]): Promise<AIMessageResponse> {
// Convert existing interpretUserQuery implementation to match new interface
const lastMessage = context[context.length - 1];
return this.interpretUserQuery(lastMessage.content, context);
}
}
40 changes: 40 additions & 0 deletions ai/cryptocom-ai-agent-service/src/services/llm/llm.factory.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
import { LLMProvider, Options } from '../agent/agent.interfaces.js';
import { OpenAIService } from './openai.service.js';
import { GeminiService } from './gemini.service.js';
import { VertexAIService } from './vertexai.service.js';
import { LLMService } from './llm.interface.js';

export function createLLMService(options: Options): LLMService {
switch (options.llmProvider) {
case LLMProvider.OpenAI:
if (!options.openAI?.apiKey) {
throw new Error('OpenAI API key is required');
}
return new OpenAIService({
apiKey: options.openAI.apiKey,
model: options.openAI.model,
});

case LLMProvider.Gemini:
if (!options.gemini?.apiKey) {
throw new Error('Gemini API key is required');
}
return new GeminiService({
apiKey: options.gemini.apiKey,
model: options.gemini.model,
});

case LLMProvider.VertexAI:
if (!options.vertexAI?.projectId) {
throw new Error('Vertex AI project ID is required');
}
return new VertexAIService({
projectId: options.vertexAI.projectId,
location: options.vertexAI.location,
model: options.vertexAI.model,
});

default:
throw new Error(`Unsupported LLM provider: ${options.llmProvider}`);
}
}
15 changes: 9 additions & 6 deletions ai/cryptocom-ai-agent-service/src/services/llm/llm.interface.ts
Original file line number Diff line number Diff line change
@@ -1,15 +1,18 @@
import { AIMessageResponse, FunctionCallResponse, QueryContext } from '../agent/agent.interfaces.js';
import { AIMessageResponse, QueryContext, FunctionCallResponse } from '../agent/agent.interfaces.js';

export interface LLMConfig {
apiKey?: string;
model?: string;
projectId?: string;
location?: string;
}

export interface LLMService {
generateResponse(context: QueryContext[]): Promise<AIMessageResponse>;
interpretUserQuery(query: string, context: QueryContext[]): Promise<AIMessageResponse>;
generateFinalResponse(
query: string,
functionResponses: FunctionCallResponse[],
context: QueryContext[]
): Promise<string>;
}

export interface LLMConfig {
apiKey: string;
model?: string;
}
Original file line number Diff line number Diff line change
Expand Up @@ -113,4 +113,9 @@ export class OpenAIService implements LLMService {
return 'Error generating final response';
}
}

async generateResponse(context: QueryContext[]): Promise<AIMessageResponse> {
const lastMessage = context[context.length - 1];
return this.interpretUserQuery(lastMessage.content, context);
}
}
Loading

0 comments on commit ce43c46

Please sign in to comment.