Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Serverreadssecrets #603

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 20 additions & 8 deletions packages/cli/src/info.ts
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
import { parseTokenFromEnv } from "../../core/src/connection"
import { MODEL_PROVIDERS } from "../../core/src/constants"
import { errorMessage } from "../../core/src/error"
import { host } from "../../core/src/host"
import { host, runtimeHost } from "../../core/src/host"
import {
ModelConnectionInfo,
resolveModelConnectionInfo,
} from "../../core/src/models"
import { ServerEnvResponse } from "../../core/src/server/messages"
import { CORE_VERSION } from "../../core/src/version"
import { YAMLStringify } from "../../core/src/yaml"
import { buildProject } from "./build"
Expand All @@ -19,29 +20,40 @@ export async function systemInfo() {
}

export async function envInfo(provider: string, options?: { token?: boolean }) {
const res = await resolveEnv(provider, options)
console.log(YAMLStringify(res))
}

export async function resolveEnv(
provider: string,
options?: { token?: boolean }
): Promise<ServerEnvResponse> {
const { token } = options || {}
const res: any = {}
res[".env"] = host.dotEnvPath ?? ""
res.providers = []
const res: ServerEnvResponse = {
ok: true,
env: host.dotEnvPath ?? "",
providers: [],
}
const env = process.env
for (const modelProvider of MODEL_PROVIDERS.filter(
(mp) => !provider || mp.id === provider
)) {
try {
const conn = await parseTokenFromEnv(env, `${modelProvider.id}:*`)
if (conn) {
if (!token && conn.token)
conn.token = "***"
if (!token && conn.token) conn.token = "***"
res.providers.push(conn)
}
} catch (e) {
res.providers.push({
provider: modelProvider.id,
model: undefined,
base: undefined,
error: errorMessage(e),
})
}
}
console.log(YAMLStringify(res))
return res
}

async function resolveScriptsConnectionInfo(
Expand All @@ -51,7 +63,7 @@ async function resolveScriptsConnectionInfo(
const models: Record<string, ModelConnectionOptions> = {}
for (const template of templates) {
const conn: ModelConnectionOptions = {
model: template.model ?? host.defaultModelOptions.model,
model: template.model ?? runtimeHost.defaultModelOptions.model,
}
const key = JSON.stringify(conn)
if (!models[key]) models[key] = conn
Expand Down
14 changes: 9 additions & 5 deletions packages/cli/src/server.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ import {
import {
LanguageModelConfiguration,
ResponseStatus,
ServerResponse,
ServerVersionResponse,
host,
runtimeHost,
} from "../../core/src/host"
Expand All @@ -33,8 +33,9 @@ import {
ChatStart,
ChatChunk,
ChatCancel,
ServerEnvResponse,
} from "../../core/src/server/messages"
import { envInfo } from "./info"
import { resolveEnv } from "./info"
import { LanguageModel } from "../../core/src/chat"
import {
ChatCompletionResponse,
Expand Down Expand Up @@ -166,7 +167,7 @@ export async function startServer(options: { port: string }) {
switch (type) {
case "server.version": {
console.log(`server: version ${CORE_VERSION}`)
response = <ServerResponse>{
response = <ServerVersionResponse>{
ok: true,
version: CORE_VERSION,
node: process.version,
Expand All @@ -178,9 +179,12 @@ export async function startServer(options: { port: string }) {
}
case "server.env": {
console.log(`server: env`)
envInfo(undefined)
response = <ServerResponse>{
const info = await resolveEnv(undefined, {
token: false,
})
response = <ServerEnvResponse>{
ok: true,
...info,
}
break
}
Expand Down
13 changes: 7 additions & 6 deletions packages/core/src/chat.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import { MarkdownTrace } from "./trace"
import { PromptImage, renderPromptNode } from "./promptdom"
import { LanguageModelConfiguration, host } from "./host"
import { LanguageModelConfiguration, host, runtimeHost } from "./host"
import { GenerationOptions } from "./generation"
import { JSON5TryParse, JSON5parse, isJSONObjectOrArray } from "./json5"
import { CancellationToken, checkCancelled } from "./cancellation"
Expand Down Expand Up @@ -424,13 +424,14 @@ export function mergeGenerationOptions(
model:
runOptions?.model ??
options?.model ??
host.defaultModelOptions.model,
runtimeHost.defaultModelOptions.model,
temperature:
runOptions?.temperature ?? host.defaultModelOptions.temperature,
runOptions?.temperature ??
runtimeHost.defaultModelOptions.temperature,
embeddingsModel:
runOptions?.embeddingsModel ??
options?.embeddingsModel ??
host.defaultEmbeddingsModelOptions.embeddingsModel,
runtimeHost.defaultEmbeddingsModelOptions.embeddingsModel,
}
}

Expand All @@ -447,8 +448,8 @@ export async function executeChatSession(
): Promise<RunPromptResult> {
const {
trace,
model = host.defaultModelOptions.model,
temperature = host.defaultModelOptions.temperature,
model = runtimeHost.defaultModelOptions.model,
temperature = runtimeHost.defaultModelOptions.temperature,
topP,
maxTokens,
seed,
Expand Down
10 changes: 5 additions & 5 deletions packages/core/src/connection.ts
Original file line number Diff line number Diff line change
Expand Up @@ -28,18 +28,18 @@ import {
PLACEHOLDER_API_KEY,
} from "./constants"
import { fileExists, readText, tryReadText, writeText } from "./fs"
import { APIType, host, LanguageModelConfiguration } from "./host"
import { APIType, LanguageModelConfiguration, runtimeHost } from "./host"
import { dedent } from "./indent"
import { parseModelIdentifier } from "./models"
import { normalizeFloat, trimTrailingSlash } from "./util"

export async function parseDefaultsFromEnv(env: Record<string, string>) {
if (env.GENAISCRIPT_DEFAULT_MODEL)
host.defaultModelOptions.model = env.GENAISCRIPT_DEFAULT_MODEL
runtimeHost.defaultModelOptions.model = env.GENAISCRIPT_DEFAULT_MODEL
const t = normalizeFloat(env.GENAISCRIPT_DEFAULT_TEMPERATURE)
if (!isNaN(t)) host.defaultModelOptions.temperature = t
if (!isNaN(t)) runtimeHost.defaultModelOptions.temperature = t
if (env.GENAISCRIPT_DEFAULT_EMBEDDINGS_MODEL)
host.defaultEmbeddingsModelOptions.embeddingsModel =
runtimeHost.defaultEmbeddingsModelOptions.embeddingsModel =
env.GENAISCRIPT_DEFAULT_EMBEDDINGS_MODEL
}

Expand All @@ -48,7 +48,7 @@ export async function parseTokenFromEnv(
modelId: string
): Promise<LanguageModelConfiguration> {
const { provider, model, tag } = parseModelIdentifier(
modelId ?? host.defaultModelOptions.model
modelId ?? runtimeHost.defaultModelOptions.model
)
if (provider === MODEL_PROVIDER_OPENAI) {
if (env.OPENAI_API_KEY || env.OPENAI_API_BASE || env.OPENAI_API_TYPE) {
Expand Down
1 change: 1 addition & 0 deletions packages/core/src/constants.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ export const CHANGE = "change"
export const TRACE_CHUNK = "traceChunk"
export const RECONNECT = "reconnect"
export const OPEN = "open"
export const MESSAGE = "message"
export const MAX_CACHED_TEMPERATURE = 0.5
export const MAX_CACHED_TOP_P = 0.5
export const MAX_TOOL_CALLS = 100
Expand Down
4 changes: 2 additions & 2 deletions packages/core/src/expander.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ import { toChatCompletionUserMessage } from "./chat"
import { importPrompt } from "./importprompt"
import { parseModelIdentifier } from "./models"
import { JSONSchemaStringifyToTypeScript } from "./schema"
import { host } from "./host"
import { host, runtimeHost } from "./host"
import { resolveSystems } from "./systems"
import { GenerationOptions, GenerationStatus } from "./generation"
import {
Expand Down Expand Up @@ -175,7 +175,7 @@ export async function expandTemplate(
options.temperature ??
normalizeFloat(env.vars["temperature"]) ??
template.temperature ??
host.defaultModelOptions.temperature
runtimeHost.defaultModelOptions.temperature
const topP =
options.topP ?? normalizeFloat(env.vars["top_p"]) ?? template.topP
const max_tokens =
Expand Down
8 changes: 4 additions & 4 deletions packages/core/src/github.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import {
GITHUB_TOKEN,
} from "./constants"
import { createFetch } from "./fetch"
import { host } from "./host"
import { host, runtimeHost } from "./host"
import { link, prettifyMarkdown } from "./markdown"
import { logError, logVerbose, normalizeInt } from "./util"

Expand Down Expand Up @@ -70,7 +70,7 @@ export async function githubUpdatePullRequestDescription(
assert(commentTag)

if (!issue) return { updated: false, statusText: "missing issue number" }
const token = await host.readSecret(GITHUB_TOKEN)
const token = await runtimeHost.readSecret(GITHUB_TOKEN)
if (!token) return { updated: false, statusText: "missing github token" }

text = prettifyMarkdown(text)
Expand Down Expand Up @@ -169,7 +169,7 @@ export async function githubCreateIssueComment(
const { apiUrl, repository, issue } = info

if (!issue) return { created: false, statusText: "missing issue number" }
const token = await host.readSecret(GITHUB_TOKEN)
const token = await runtimeHost.readSecret(GITHUB_TOKEN)
if (!token) return { created: false, statusText: "missing github token" }

const fetch = await createFetch({ retryOn: [] })
Expand Down Expand Up @@ -313,7 +313,7 @@ export async function githubCreatePullRequestReviews(
logError("missing commit sha")
return false
}
const token = await host.readSecret(GITHUB_TOKEN)
const token = await runtimeHost.readSecret(GITHUB_TOKEN)
if (!token) {
logError("missing github token")
return false
Expand Down
18 changes: 9 additions & 9 deletions packages/core/src/host.ts
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,7 @@ export interface ResponseStatus {
status?: number
}

export interface RetrievalSearchOptions extends VectorSearchOptions {
}
export interface RetrievalSearchOptions extends VectorSearchOptions {}

export interface RetrievalSearchResponse extends ResponseStatus {
results: WorkspaceFileWithScore[]
Expand All @@ -70,7 +69,7 @@ export interface RetrievalService {
): Promise<RetrievalSearchResponse>
}

export interface ServerResponse extends ResponseStatus {
export interface ServerVersionResponse extends ResponseStatus {
version: string
node: string
platform: string
Expand All @@ -96,12 +95,6 @@ export interface Host {
installFolder(): string
resolvePath(...segments: string[]): string

// read a secret from the environment or a .env file
readSecret(name: string): Promise<string | undefined>
defaultModelOptions: Required<Pick<ModelOptions, "model" | "temperature">>
defaultEmbeddingsModelOptions: Required<
Pick<EmbeddingsModelOptions, "embeddingsModel">
>
getLanguageModelConfiguration(
modelId: string,
options?: { token?: boolean } & AbortSignalOptions & TraceOptions
Expand Down Expand Up @@ -130,6 +123,13 @@ export interface RuntimeHost extends Host {
models: ModelService
workspace: Omit<WorkspaceFileSystem, "grep">

// read a secret from the environment or a .env file
readSecret(name: string): Promise<string | undefined>
defaultModelOptions: Required<Pick<ModelOptions, "model" | "temperature">>
defaultEmbeddingsModelOptions: Required<
Pick<EmbeddingsModelOptions, "embeddingsModel">
>

// executes a process
exec(
containerId: string,
Expand Down
4 changes: 2 additions & 2 deletions packages/core/src/models.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import { MODEL_PROVIDER_LLAMAFILE, MODEL_PROVIDER_OPENAI } from "./constants"
import { errorMessage } from "./error"
import { LanguageModelConfiguration, host } from "./host"
import { LanguageModelConfiguration, host, runtimeHost } from "./host"
import { AbortSignalOptions, MarkdownTrace, TraceOptions } from "./trace"
import { assert } from "./util"

Expand Down Expand Up @@ -86,7 +86,7 @@ export async function resolveModelConnectionInfo(
}> {
const { trace, token: askToken, signal } = options || {}
const hasModel = options?.model ?? conn.model
const model = options?.model ?? conn.model ?? host.defaultModelOptions.model
const model = options?.model ?? conn.model ?? runtimeHost.defaultModelOptions.model
try {
const configuration = await host.getLanguageModelConfiguration(model, {
token: askToken,
Expand Down
6 changes: 4 additions & 2 deletions packages/core/src/promptcontext.ts
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ export async function createPromptContext(
searchOptions.embeddingsModel =
searchOptions?.embeddingsModel ??
options?.embeddingsModel ??
host.defaultEmbeddingsModelOptions.embeddingsModel
runtimeHost.defaultEmbeddingsModelOptions.embeddingsModel
const key = await sha256string(
JSON.stringify({ files, searchOptions })
)
Expand Down Expand Up @@ -294,7 +294,9 @@ export async function createPromptContext(
)
if (!connection.configuration)
throw new Error("model connection error " + connection.info)
const { completer } = await resolveLanguageModel(connection.configuration.provider)
const { completer } = await resolveLanguageModel(
connection.configuration.provider
)
if (!completer)
throw new Error(
"model driver not found for " + connection.info
Expand Down
Loading
Loading