diff --git a/src/assets/locale/da/common.json b/src/assets/locale/da/common.json
index 4d0de2dc..de82642e 100644
--- a/src/assets/locale/da/common.json
+++ b/src/assets/locale/da/common.json
@@ -70,6 +70,10 @@
"label": "Længden af Kontekst",
"placeholder": "Instast Længden af Kontekst værdien (standard: 2048)"
},
+ "numPredict": {
+ "label": "Maks Tokens (num_predict)",
+ "placeholder": "Indtast Maks Tokens værdi (fx. 2048, 4096)"
+ },
"seed": {
"label": "Seed",
"placeholder": "Indtast Seed værdi (fx. 1234)",
@@ -112,5 +116,6 @@
"older": "Ældre"
},
"pin": "Fastgør",
- "unpin": "Frigør"
+ "unpin": "Frigør",
+ "generationInfo": "Genererings Info"
}
\ No newline at end of file
diff --git a/src/assets/locale/da/openai.json b/src/assets/locale/da/openai.json
index 3410ae65..8ef3c07e 100644
--- a/src/assets/locale/da/openai.json
+++ b/src/assets/locale/da/openai.json
@@ -42,7 +42,7 @@
"delete": "Slet",
"edit": "Rediger",
"newModel": "Tilføj Modeller til Udbyder",
- "noNewModel": "For LMStudio henter vi dynamisk. Ingen manuel tilføjelse nødvendig.",
+ "noNewModel": "For LMStudio, Ollama, Llamafile, henter vi dynamisk. Ingen manuel tilføjelse nødvendig.",
"searchModel": "Søg Model",
"selectAll": "Vælg Alle",
"save": "Gem",
diff --git a/src/assets/locale/da/option.json b/src/assets/locale/da/option.json
index 4548680b..5fc02819 100644
--- a/src/assets/locale/da/option.json
+++ b/src/assets/locale/da/option.json
@@ -8,5 +8,6 @@
"somethingWentWrong": "Noget gik galt",
"validationSelectModel": "Venligst vælg en model for at forsæætte",
"deleteHistoryConfirmation": "Er du sikker på at du vil slette denne historik?",
- "editHistoryTitle": "Indtast en ny titel"
+ "editHistoryTitle": "Indtast en ny titel",
+ "temporaryChat": "Midlertidig Chat"
}
\ No newline at end of file
diff --git a/src/assets/locale/de/common.json b/src/assets/locale/de/common.json
index 739e0dd8..65b72abe 100644
--- a/src/assets/locale/de/common.json
+++ b/src/assets/locale/de/common.json
@@ -70,6 +70,10 @@
"label": "Anzahl der Kontexte",
"placeholder": "Geben Sie die Anzahl der Kontexte ein (Standard: 2048)"
},
+ "numPredict": {
+ "label": "Max Tokens (num_predict)",
+ "placeholder": "Geben Sie den Max-Tokens-Wert ein (z.B. 2048, 4096)"
+ },
"seed": {
"label": "Seed",
"placeholder": "Geben Sie den Seed-Wert ein (z.B. 1234)",
@@ -112,5 +116,6 @@
"older": "Älter"
},
"pin": "Anheften",
- "unpin": "Losheften"
+ "unpin": "Losheften",
+ "generationInfo": "Generierungsinformationen"
}
\ No newline at end of file
diff --git a/src/assets/locale/de/openai.json b/src/assets/locale/de/openai.json
index 23a684eb..98e75199 100644
--- a/src/assets/locale/de/openai.json
+++ b/src/assets/locale/de/openai.json
@@ -42,7 +42,7 @@
"delete": "Löschen",
"edit": "Bearbeiten",
"newModel": "Modelle zum Anbieter hinzufügen",
- "noNewModel": "Für LMStudio holen wir die Daten dynamisch. Keine manuelle Hinzufügung erforderlich.",
+ "noNewModel": "Für LMStudio, Ollama, Llamafile, holen wir die Daten dynamisch. Keine manuelle Hinzufügung erforderlich.",
"searchModel": "Modell suchen",
"selectAll": "Alle auswählen",
"save": "Speichern",
diff --git a/src/assets/locale/de/option.json b/src/assets/locale/de/option.json
index f6ec7f92..4303931a 100644
--- a/src/assets/locale/de/option.json
+++ b/src/assets/locale/de/option.json
@@ -8,5 +8,6 @@
"somethingWentWrong": "Etwas ist schiefgelaufen",
"validationSelectModel": "Bitte wähle ein Modell aus, um fortzufahren",
"deleteHistoryConfirmation": "Bist du sicher, dass du diesen Verlauf löschen möchtest?",
- "editHistoryTitle": "Gib einen neuen Titel ein"
+ "editHistoryTitle": "Gib einen neuen Titel ein",
+ "temporaryChat": "Temporärer Chat"
}
\ No newline at end of file
diff --git a/src/assets/locale/en/common.json b/src/assets/locale/en/common.json
index f5b9e398..d033144e 100644
--- a/src/assets/locale/en/common.json
+++ b/src/assets/locale/en/common.json
@@ -70,6 +70,10 @@
"label": "Number of Contexts",
"placeholder": "Enter Number of Contexts value (default: 2048)"
},
+ "numPredict": {
+ "label": "Max Tokens (num_predict)",
+ "placeholder": "Enter Max Tokens value (e.g. 2048, 4096)"
+ },
"seed": {
"label": "Seed",
"placeholder": "Enter Seed value (e.g. 1234)",
@@ -116,5 +120,6 @@
"older": "Older"
},
"pin": "Pin",
- "unpin": "Unpin"
+ "unpin": "Unpin",
+ "generationInfo": "Generation Info"
}
diff --git a/src/assets/locale/en/openai.json b/src/assets/locale/en/openai.json
index c0838cbf..f8ad2986 100644
--- a/src/assets/locale/en/openai.json
+++ b/src/assets/locale/en/openai.json
@@ -42,7 +42,7 @@
"delete": "Delete",
"edit": "Edit",
"newModel": "Add Models to Provider",
- "noNewModel": "For LMStudio, we fetch dynamically. No manual addition needed.",
+ "noNewModel": "For LMStudio, Ollama, Llamafile, we fetch dynamically. No manual addition needed.",
"searchModel": "Search Model",
"selectAll": "Select All",
"save": "Save",
diff --git a/src/assets/locale/en/option.json b/src/assets/locale/en/option.json
index 5739b40a..7d11938c 100644
--- a/src/assets/locale/en/option.json
+++ b/src/assets/locale/en/option.json
@@ -8,5 +8,6 @@
"somethingWentWrong": "Something went wrong",
"validationSelectModel": "Please select a model to continue",
"deleteHistoryConfirmation": "Are you sure you want to delete this history?",
- "editHistoryTitle": "Enter a new title"
+ "editHistoryTitle": "Enter a new title",
+ "temporaryChat": "Temporary Chat"
}
\ No newline at end of file
diff --git a/src/assets/locale/es/common.json b/src/assets/locale/es/common.json
index 65590aa8..210e0438 100644
--- a/src/assets/locale/es/common.json
+++ b/src/assets/locale/es/common.json
@@ -70,6 +70,10 @@
"label": "Cantidad de contextos",
"placeholder": "Ingresar el valor de tamaño de la ventana de contexto (por defecto: 2048)"
},
+ "numPredict": {
+ "label": "Máximo de Tokens (num_predict)",
+ "placeholder": "Ingrese el valor máximo de Tokens (ej: 2048, 4096)"
+ },
"seed": {
"label": "Semilla",
"placeholder": "Ingresar el valor de la semilla (ej: 1234)",
@@ -111,5 +115,6 @@
"older": "Más antiguo"
},
"pin": "Fijar",
- "unpin": "Desfijar"
+ "unpin": "Desfijar",
+ "generationInfo": "Información de Generación"
}
\ No newline at end of file
diff --git a/src/assets/locale/es/openai.json b/src/assets/locale/es/openai.json
index 2e84a196..b4bd9b28 100644
--- a/src/assets/locale/es/openai.json
+++ b/src/assets/locale/es/openai.json
@@ -42,7 +42,7 @@
"delete": "Eliminar",
"edit": "Editar",
"newModel": "Añadir Modelos al Proveedor",
- "noNewModel": "Para LMStudio, obtenemos dinámicamente. No se necesita adición manual.",
+ "noNewModel": "Para LMStudio, Ollama, Llamafile, obtenemos dinámicamente. No se necesita adición manual.",
"searchModel": "Buscar Modelo",
"selectAll": "Seleccionar Todo",
"save": "Guardar",
diff --git a/src/assets/locale/es/option.json b/src/assets/locale/es/option.json
index 3c48761c..d9ab8cfc 100644
--- a/src/assets/locale/es/option.json
+++ b/src/assets/locale/es/option.json
@@ -8,5 +8,6 @@
"somethingWentWrong": "Hubo un error",
"validationSelectModel": "Selecione un modelo para continuar",
"deleteHistoryConfirmation": "¿Esta seguro que quiere borrar éste histórico?",
- "editHistoryTitle": "Ingrese un nuevo título"
+ "editHistoryTitle": "Ingrese un nuevo título",
+ "temporaryChat": "Chat Temporal"
}
diff --git a/src/assets/locale/fa/common.json b/src/assets/locale/fa/common.json
index 4adb307b..72301c75 100644
--- a/src/assets/locale/fa/common.json
+++ b/src/assets/locale/fa/common.json
@@ -70,6 +70,10 @@
"label": "Number of Contexts",
"placeholder": "مقدار Number of Contexts را وارد کنید (پیش فرض: 2048)"
},
+ "numPredict": {
+ "label": "حداکثر توکنها (num_predict)",
+ "placeholder": "مقدار حداکثر توکنها را وارد کنید (مثلا 2048، 4096)"
+ },
"seed": {
"label": "Seed",
"placeholder": "مقدار Seed را وارد کنید (e.g. 1234)",
@@ -105,5 +109,6 @@
"older": "قدیمیتر"
},
"pin": "پین کردن",
- "unpin": "حذف پین"
+ "unpin": "حذف پین",
+ "generationInfo": "اطلاعات تولید"
}
\ No newline at end of file
diff --git a/src/assets/locale/fa/openai.json b/src/assets/locale/fa/openai.json
index 479e509c..7258e832 100644
--- a/src/assets/locale/fa/openai.json
+++ b/src/assets/locale/fa/openai.json
@@ -42,7 +42,7 @@
"delete": "حذف",
"edit": "ویرایش",
"newModel": "افزودن مدلها به ارائهدهنده",
- "noNewModel": "برای LMStudio، ما به صورت پویا دریافت میکنیم. نیازی به افزودن دستی نیست.",
+ "noNewModel": "برای LMStudio, Ollama, Llamafile, ما به صورت پویا دریافت میکنیم. نیازی به افزودن دستی نیست.",
"searchModel": "جستجوی مدل",
"selectAll": "انتخاب همه",
"save": "ذخیره",
diff --git a/src/assets/locale/fa/option.json b/src/assets/locale/fa/option.json
index 91887102..5278cd03 100644
--- a/src/assets/locale/fa/option.json
+++ b/src/assets/locale/fa/option.json
@@ -8,5 +8,6 @@
"somethingWentWrong": "مشکلی پیش آمد",
"validationSelectModel": "لطفا یک مدل را برای ادامه انتخاب کنید",
"deleteHistoryConfirmation": "آیا مطمئن هستید که می خواهید این تاریخچه را حذف کنید؟",
- "editHistoryTitle": "یک عنوان جدید وارد کنید"
+ "editHistoryTitle": "یک عنوان جدید وارد کنید",
+ "temporaryChat": "گپ موقت"
}
diff --git a/src/assets/locale/fr/common.json b/src/assets/locale/fr/common.json
index 8dbe6092..d11ef8c8 100644
--- a/src/assets/locale/fr/common.json
+++ b/src/assets/locale/fr/common.json
@@ -70,6 +70,10 @@
"label": "Nombre de contextes",
"placeholder": "Entrez la valeur du nombre de contextes (par défaut: 2048)"
},
+ "numPredict": {
+ "label": "Tokens maximum (num_predict)",
+ "placeholder": "Entrez la valeur des tokens maximum (par exemple 2048, 4096)"
+ },
"seed": {
"label": "Graine",
"placeholder": "Entrez la valeur des semences (par exemple 1234)",
@@ -111,5 +115,6 @@
"older": "Plus ancien"
},
"pin": "Épingler",
- "unpin": "Désépingler"
+ "unpin": "Désépingler",
+ "generationInfo": "Informations de génération"
}
\ No newline at end of file
diff --git a/src/assets/locale/fr/openai.json b/src/assets/locale/fr/openai.json
index a71256c9..2d9a0c29 100644
--- a/src/assets/locale/fr/openai.json
+++ b/src/assets/locale/fr/openai.json
@@ -42,7 +42,7 @@
"delete": "Supprimer",
"edit": "Modifier",
"newModel": "Ajouter des modèles au fournisseur",
- "noNewModel": "Pour LMStudio, nous récupérons dynamiquement. Pas besoin d'ajout manuel.",
+ "noNewModel": "Pour LMStudio, Ollama, Llamafile, nous récupérons dynamiquement. Pas besoin d'ajout manuel.",
"searchModel": "Rechercher un modèle",
"selectAll": "Tout sélectionner",
"save": "Enregistrer",
diff --git a/src/assets/locale/fr/option.json b/src/assets/locale/fr/option.json
index ec40c0e4..ad374aa0 100644
--- a/src/assets/locale/fr/option.json
+++ b/src/assets/locale/fr/option.json
@@ -8,5 +8,6 @@
"somethingWentWrong": "Quelque chose s'est mal passé",
"validationSelectModel": "Veuillez sélectionner un modèle pour continuer",
"deleteHistoryConfirmation": "Êtes-vous sûr de vouloir supprimer cette historique ?",
- "editHistoryTitle": "Entrez un nouveau titre"
+ "editHistoryTitle": "Entrez un nouveau titre",
+ "temporaryChat": "Chat temporaire"
}
\ No newline at end of file
diff --git a/src/assets/locale/it/common.json b/src/assets/locale/it/common.json
index 80ede680..6b085090 100644
--- a/src/assets/locale/it/common.json
+++ b/src/assets/locale/it/common.json
@@ -70,6 +70,10 @@
"label": "Dimensione del Contesto",
"placeholder": "Inserisci la Dimensione del Contesto (default: 2048)"
},
+ "numPredict": {
+ "label": "Token Massimi (num_predict)",
+ "placeholder": "Inserisci il valore dei Token Massimi (es. 2048, 4096)"
+ },
"seed": {
"label": "Seed",
"placeholder": "Inserisci il Valore Seed (e.g. 1234)",
@@ -111,5 +115,6 @@
"older": "Più Vecchi"
},
"pin": "Fissa",
- "unpin": "Rimuovi"
+ "unpin": "Rimuovi",
+ "generationInfo": "Informazioni sulla Generazione"
}
\ No newline at end of file
diff --git a/src/assets/locale/it/openai.json b/src/assets/locale/it/openai.json
index c9d27efa..589da515 100644
--- a/src/assets/locale/it/openai.json
+++ b/src/assets/locale/it/openai.json
@@ -42,7 +42,7 @@
"delete": "Elimina",
"edit": "Modifica",
"newModel": "Aggiungi Modelli al Provider",
- "noNewModel": "Per LMStudio, recuperiamo dinamicamente. Non è necessaria l'aggiunta manuale.",
+ "noNewModel": "Per LMStudio, Ollama, Llamafile, recuperiamo dinamicamente. Non è necessaria l'aggiunta manuale.",
"searchModel": "Cerca Modello",
"selectAll": "Seleziona Tutto",
"save": "Salva",
diff --git a/src/assets/locale/it/option.json b/src/assets/locale/it/option.json
index 6fcd098f..fb222c3d 100644
--- a/src/assets/locale/it/option.json
+++ b/src/assets/locale/it/option.json
@@ -8,5 +8,6 @@
"somethingWentWrong": "Qualcosa è andato storto",
"validationSelectModel": "Scegliere un modello per continuare",
"deleteHistoryConfirmation": "Sei sicuro che vuoi eliminare la cronologia?",
- "editHistoryTitle": "Inserisci un nuovo titolo"
+ "editHistoryTitle": "Inserisci un nuovo titolo",
+ "temporaryChat": "Chat Temporanea"
}
\ No newline at end of file
diff --git a/src/assets/locale/ja-JP/common.json b/src/assets/locale/ja-JP/common.json
index df20028c..92ba8e0f 100644
--- a/src/assets/locale/ja-JP/common.json
+++ b/src/assets/locale/ja-JP/common.json
@@ -70,6 +70,10 @@
"label": "コンテキストの数",
"placeholder": "コンテキスト数を入力してください(デフォルト:2048)"
},
+ "numPredict": {
+ "label": "最大トークン数 (num_predict)",
+ "placeholder": "最大トークン数を入力してください(例:2048、4096)"
+ },
"seed": {
"label": "シード",
"placeholder": "シード値を入力してください(例:1234)",
@@ -111,5 +115,6 @@
"older": "それ以前"
},
"pin": "固定",
- "unpin": "固定解除"
+ "unpin": "固定解除",
+ "generationInfo": "生成情報"
}
\ No newline at end of file
diff --git a/src/assets/locale/ja-JP/openai.json b/src/assets/locale/ja-JP/openai.json
index 829e8861..2e755e87 100644
--- a/src/assets/locale/ja-JP/openai.json
+++ b/src/assets/locale/ja-JP/openai.json
@@ -42,7 +42,7 @@
"delete": "削除",
"edit": "編集",
"newModel": "プロバイダーにモデルを追加",
- "noNewModel": "LMStudioの場合、動的に取得します。手動での追加は不要です。",
+ "noNewModel": "LMStudio, Ollama, Llamafile,の場合、動的に取得します。手動での追加は不要です。",
"searchModel": "モデルを検索",
"selectAll": "すべて選択",
"save": "保存",
diff --git a/src/assets/locale/ja-JP/option.json b/src/assets/locale/ja-JP/option.json
index 57b023bf..76dc343f 100644
--- a/src/assets/locale/ja-JP/option.json
+++ b/src/assets/locale/ja-JP/option.json
@@ -8,5 +8,6 @@
"somethingWentWrong": "何かが間違っています",
"validationSelectModel": "続行するにはモデルを選択してください",
"deleteHistoryConfirmation": "この履歴を削除しますか?",
- "editHistoryTitle": "新しいタイトルを入力"
+ "editHistoryTitle": "新しいタイトルを入力",
+ "temporaryChat": "一時的なチャット"
}
\ No newline at end of file
diff --git a/src/assets/locale/ko/chrome.json b/src/assets/locale/ko/chrome.json
new file mode 100644
index 00000000..8f89fc1a
--- /dev/null
+++ b/src/assets/locale/ko/chrome.json
@@ -0,0 +1,13 @@
+{
+ "heading": "Chrome AI 설정",
+ "status": {
+ "label": "Page Assist에서 Chrome AI 지원을 활성화하거나 비활성화하기"
+ },
+ "error": {
+ "browser_not_supported": "이 Chrome 버전은 Gemini Nano 모델을 지원하지 않습니다. 버전을 127 이상으로 업데이트해 주세요.",
+ "ai_not_supported": "설정 `chrome://flags/#prompt-api-for-gemini-nano`가 활성화되지 않았습니다. 활성화해 주세요.",
+ "ai_not_ready": "Gemini Nano가 아직 준비되지 않았습니다. Chrome 설정을 다시 확인해 주세요.",
+ "internal_error": "내부 오류가 발생했습니다. 나중에 다시 시도해 주세요."
+ },
+ "errorDescription": "Chrome AI를 사용하려면 현재 Dev 및 Canary 채널에 있는 127 이상의 브라우저 버전이 필요합니다. 지원되는 버전을 다운로드한 후, 다음 단계를 따르세요:\n\n1. `chrome://flags/#prompt-api-for-gemini-nano`에 접속하여 '활성화'를 선택합니다.\n2. `chrome://flags/#optimization-guide-on-device-model`에 접속하여 'EnabledBypassPrefRequirement'를 선택합니다.\n3. `chrome://components`에 접속하여 'Optimization Guide On Device Model'을 검색한 후 '업데이트 확인'을 클릭합니다. 이를 통해 모델이 다운로드됩니다. 설정이 표시되지 않는 경우, 단계 1과 2를 반복하고 브라우저를 재시작해 주세요."
+}
diff --git a/src/assets/locale/ko/common.json b/src/assets/locale/ko/common.json
new file mode 100644
index 00000000..ed9e0b98
--- /dev/null
+++ b/src/assets/locale/ko/common.json
@@ -0,0 +1,119 @@
+{
+ "pageAssist": "페이지 어시스트",
+ "selectAModel": "모델 선택",
+ "save": "저장",
+ "saved": "저장됨",
+ "cancel": "취소",
+ "retry": "재시도",
+ "share": {
+ "tooltip": {
+ "share": "공유"
+ },
+ "modal": {
+ "title": "채팅 링크 공유"
+ },
+ "form": {
+ "defaultValue": {
+ "name": "익명",
+ "title": "제목 없는 채팅"
+ },
+ "title": {
+ "label": "채팅 제목",
+ "placeholder": "채팅 제목을 입력하세요",
+ "required": "채팅 제목은 필수 항목입니다"
+ },
+ "name": {
+ "label": "이름",
+ "placeholder": "이름을 입력하세요",
+ "required": "이름은 필수 항목입니다"
+ },
+ "btn": {
+ "save": "링크 생성",
+ "saving": "링크 생성 중..."
+ }
+ },
+ "notification": {
+ "successGenerate": "링크가 클립보드에 복사되었습니다",
+ "failGenerate": "링크 생성에 실패했습니다"
+ }
+ },
+ "copyToClipboard": "클립보드에 복사",
+ "webSearch": "웹 검색 중",
+ "regenerate": "재생성",
+ "edit": "편집",
+ "delete": "삭제",
+ "saveAndSubmit": "저장하고 제출",
+ "editMessage": {
+ "placeholder": "메시지를 입력하세요..."
+ },
+ "submit": "제출",
+ "noData": "데이터가 없습니다",
+ "noHistory": "채팅 기록이 없습니다",
+ "chatWithCurrentPage": "현재 페이지에서 채팅",
+ "beta": "베타",
+ "tts": "TTS",
+ "currentChatModelSettings": "현재 채팅 모델 설정",
+ "modelSettings": {
+ "label": "모델 설정",
+ "description": "모든 채팅에 대해 글로벌 모델 옵션을 설정합니다",
+ "form": {
+ "keepAlive": {
+ "label": "Keep Alive",
+ "help": "요청 후 모델이 메모리에 유지되는 시간을 설정합니다 (기본값: 5분)",
+ "placeholder": "Keep Alive 기간을 입력하세요 (예: 5분, 10분, 1시간)"
+ },
+ "temperature": {
+ "label": "온도",
+ "placeholder": "온도 값을 입력하세요 (예: 0.7, 1.0)"
+ },
+ "numCtx": {
+ "label": "컨텍스트 수",
+ "placeholder": "컨텍스트 수를 입력하세요 (기본값: 2048)"
+ },
+ "numPredict": {
+ "label": "최대 토큰 수 (num_predict)",
+ "placeholder": "최대 토큰 수를 입력하세요 (예: 2048, 4096)"
+ }, "seed": {
+ "label": "시드",
+ "placeholder": "시드 값을 입력하세요 (예: 1234)",
+ "help": "모델 출력의 재현성"
+ },
+ "topK": {
+ "label": "Top K",
+ "placeholder": "Top K 값을 입력하세요 (예: 40, 100)"
+ },
+ "topP": {
+ "label": "Top P",
+ "placeholder": "Top P 값을 입력하세요 (예: 0.9, 0.95)"
+ },
+ "numGpu": {
+ "label": "GPU 수",
+ "placeholder": "GPU에 할당할 레이어 수를 입력하세요"
+ },
+ "systemPrompt": {
+ "label": "임시 시스템 프롬프트",
+ "placeholder": "시스템 프롬프트를 입력하세요",
+ "help": "현재 채팅에서 시스템 프롬프트를 빠르게 설정하는 방법이며, 선택된 시스템 프롬프트가 있을 경우 이를 덮어씁니다."
+ }
+ },
+ "advanced": "기타 모델 설정"
+ },
+ "copilot": {
+ "summary": "요약",
+ "explain": "설명",
+ "rephrase": "다르게 표현",
+ "translate": "번역"
+ },
+ "citations": "인용",
+ "downloadCode": "코드 다운로드",
+ "date": {
+ "pinned": "고정됨",
+ "today": "오늘",
+ "yesterday": "어제",
+ "last7Days": "지난 7일",
+ "older": "그 이전"
+ },
+ "pin": "고정",
+ "unpin": "고정 해제",
+ "generationInfo": "생성 정보"
+}
diff --git a/src/assets/locale/ko/knowledge.json b/src/assets/locale/ko/knowledge.json
new file mode 100644
index 00000000..1bf85366
--- /dev/null
+++ b/src/assets/locale/ko/knowledge.json
@@ -0,0 +1,40 @@
+{
+ "addBtn": "새 지식 추가",
+ "columns": {
+ "title": "제목",
+ "status": "상태",
+ "embeddings": "임베딩 모델",
+ "createdAt": "생성일",
+ "action": "작업"
+ },
+ "expandedColumns": {
+ "name": "이름"
+ },
+ "confirm": {
+ "delete": "이 지식을 삭제하시겠습니까?"
+ },
+ "deleteSuccess": "지식이 정상적으로 삭제되었습니다",
+ "status": {
+ "pending": "대기 중",
+ "finished": "완료",
+ "processing": "처리 중",
+ "failed": "실패"
+ },
+ "addKnowledge": "지식 추가",
+ "form": {
+ "title": {
+ "label": "지식 제목",
+ "placeholder": "지식 제목을 입력하세요",
+ "required": "지식 제목은 필수 항목입니다"
+ },
+ "uploadFile": {
+ "label": "파일 업로드",
+ "uploadText": "파일을 여기에 드래그 앤 드롭하거나 클릭하여 업로드하세요",
+ "uploadHint": "지원되는 파일 형식: .pdf, .csv, .txt",
+ "required": "파일은 필수 항목입니다"
+ },
+ "submit": "제출",
+ "success": "지식이 정상적으로 추가되었습니다"
+ },
+ "noEmbeddingModel": "먼저 RAG 설정 페이지에서 임베딩 모델을 추가해 주세요"
+}
diff --git a/src/assets/locale/ko/openai.json b/src/assets/locale/ko/openai.json
new file mode 100644
index 00000000..fae6aca9
--- /dev/null
+++ b/src/assets/locale/ko/openai.json
@@ -0,0 +1,90 @@
+{
+ "settings": "OpenAI 호환 API",
+ "heading": "OpenAI 호환 API",
+ "subheading": "여기에서 OpenAI API 호환 공급자를 관리하고 설정할 수 있습니다.",
+ "addBtn": "공급자 추가",
+ "table": {
+ "name": "공급자 이름",
+ "baseUrl": "기본 URL",
+ "actions": "작업"
+ },
+ "modal": {
+ "titleAdd": "새 공급자 추가",
+ "name": {
+ "label": "공급자 이름",
+ "required": "공급자 이름은 필수 항목입니다.",
+ "placeholder": "공급자 이름 입력"
+ },
+ "baseUrl": {
+ "label": "기본 URL",
+ "help": "OpenAI API 공급자의 기본 URL 예시: (http://localhost:1234/v1)",
+ "required": "기본 URL은 필수 항목입니다.",
+ "placeholder": "기본 URL 입력"
+ },
+ "apiKey": {
+ "label": "API 키",
+ "required": "API 키는 필수 항목입니다.",
+ "placeholder": "API 키 입력"
+ },
+ "submit": "저장",
+ "update": "업데이트",
+ "deleteConfirm": "이 공급자를 삭제하시겠습니까?",
+ "model": {
+ "title": "모델 목록",
+ "subheading": "이 공급자에서 사용하고자 하는 챗 모델을 선택하세요.",
+ "success": "새로운 모델이 정상적으로 추가되었습니다."
+ },
+ "tipLMStudio": "Page Assist는 LM Studio에 로드된 모델을 자동으로 가져옵니다. 수동 추가가 필요하지 않습니다."
+ },
+ "addSuccess": "공급자가 정상적으로 추가되었습니다.",
+ "deleteSuccess": "공급자가 정상적으로 삭제되었습니다.",
+ "updateSuccess": "공급자가 정상적으로 업데이트되었습니다.",
+ "delete": "삭제",
+ "edit": "편집",
+ "newModel": "공급자에 모델 추가",
+ "noNewModel": "LMStudio, Ollama, Llamafile,의 경우 동적으로 가져옵니다. 수동 추가는 필요하지 않습니다.",
+ "searchModel": "모델 검색",
+ "selectAll": "모두 선택",
+ "save": "저장",
+ "saving": "저장 중...",
+ "manageModels": {
+ "columns": {
+ "name": "모델 이름",
+ "model_type": "모델 타입",
+ "model_id": "모델 ID",
+ "provider": "공급자 이름",
+ "actions": "작업"
+ },
+ "tooltip": {
+ "delete": "삭제"
+ },
+ "confirm": {
+ "delete": "이 모델을 삭제하시겠습니까?"
+ },
+ "modal": {
+ "title": "사용자 정의 모델 추가",
+ "form": {
+ "name": {
+ "label": "모델 ID",
+ "placeholder": "llama3.2",
+ "required": "모델 ID는 필수 항목입니다."
+ },
+ "provider": {
+ "label": "공급자",
+ "placeholder": "공급자 선택",
+ "required": "공급자는 필수 항목입니다."
+ },
+ "type": {
+ "label": "모델 타입"
+ }
+ }
+ }
+ },
+ "noModelFound": "모델을 찾을 수 없습니다. 올바른 기본 URL과 API 키를 가진 공급자가 추가되었는지 확인하세요.",
+ "radio": {
+ "chat": "챗 모델",
+ "embedding": "임베딩 모델",
+ "chatInfo": "는 챗 완료 및 대화 생성에 사용됩니다",
+ "embeddingInfo": "는 RAG 및 기타 의미 검색 관련 작업에 사용됩니다."
+ }
+}
diff --git a/src/assets/locale/ko/option.json b/src/assets/locale/ko/option.json
new file mode 100644
index 00000000..2db30cc6
--- /dev/null
+++ b/src/assets/locale/ko/option.json
@@ -0,0 +1,13 @@
+{
+ "newChat": "새 채팅",
+ "selectAPrompt": "프롬프트 선택",
+ "githubRepository": "GitHub 리포지토리",
+ "settings": "설정",
+ "sidebarTitle": "채팅 기록",
+ "error": "오류",
+ "somethingWentWrong": "문제가 발생했습니다",
+ "validationSelectModel": "계속하려면 모델을 선택하세요",
+ "deleteHistoryConfirmation": "이 기록을 삭제하시겠습니까?",
+ "editHistoryTitle": "새 제목 입력",
+ "temporaryChat": "임시 채팅"
+}
diff --git a/src/assets/locale/ko/playground.json b/src/assets/locale/ko/playground.json
new file mode 100644
index 00000000..082ad5d2
--- /dev/null
+++ b/src/assets/locale/ko/playground.json
@@ -0,0 +1,29 @@
+{
+ "ollamaState": {
+ "searching": "Ollama 검색 중 🦙",
+ "running": "Ollama 실행 중 🦙",
+ "notRunning": "Ollama에 연결할 수 없습니다 🦙",
+ "connectionError": "연결 오류가 발생한 것 같습니다. 문제 해결에 대한 자세한 내용은 문서를 참조하세요."
+ },
+ "formError": {
+ "noModel": "모델을 선택하세요",
+ "noEmbeddingModel": "설정 > RAG 페이지에서 임베딩 모델을 설정하세요"
+ },
+ "form": {
+ "textarea": {
+ "placeholder": "메시지를 입력하세요..."
+ },
+ "webSearch": {
+ "on": "켜짐",
+ "off": "꺼짐"
+ }
+ },
+ "tooltip": {
+ "searchInternet": "인터넷 검색",
+ "speechToText": "음성 입력",
+ "uploadImage": "이미지 업로드",
+ "stopStreaming": "스트리밍 중지",
+ "knowledge": "지식"
+ },
+ "sendWhenEnter": "Enter 키를 누르면 전송"
+}
diff --git a/src/assets/locale/ko/settings.json b/src/assets/locale/ko/settings.json
new file mode 100644
index 00000000..072c7f6b
--- /dev/null
+++ b/src/assets/locale/ko/settings.json
@@ -0,0 +1,345 @@
+{
+ "generalSettings": {
+ "title": "일반 설정",
+ "settings": {
+ "heading": "웹 UI 설정",
+ "speechRecognitionLang": {
+ "label": "음성 인식 언어",
+ "placeholder": "언어 선택"
+ },
+ "language": {
+ "label": "언어",
+ "placeholder": "언어 선택"
+ },
+ "darkMode": {
+ "label": "테마 변경",
+ "options": {
+ "light": "라이트",
+ "dark": "다크"
+ }
+ },
+ "searchMode": {
+ "label": "간편 인터넷 검색 실행"
+ },
+ "copilotResumeLastChat": {
+ "label": "사이드 패널을 열 때 마지막 채팅 재개 (Copilot)"
+ },
+ "hideCurrentChatModelSettings": {
+ "label": "현재 채팅 모델 설정 숨기기"
+ },
+ "restoreLastChatModel": {
+ "label": "이전 채팅에서 마지막 사용한 모델 복원"
+ },
+ "sendNotificationAfterIndexing": {
+ "label": "지식 베이스 처리 완료 후 알림 전송"
+ },
+ "generateTitle": {
+ "label": "AI로 제목 생성"
+ }
+ },
+ "sidepanelRag": {
+ "heading": "웹사이트와의 Copilot 채팅 설정",
+ "ragEnabled": {
+ "label": "벡터 임베딩을 사용하여 웹사이트와 채팅"
+ },
+ "maxWebsiteContext": {
+ "label": "일반 모드 웹사이트 콘텐츠 크기",
+ "placeholder": "콘텐츠 크기 (기본값 4028)"
+ }
+ },
+ "webSearch": {
+ "heading": "웹 검색 관리",
+ "searchMode": {
+ "label": "간편한 인터넷 검색 실행"
+ },
+ "provider": {
+ "label": "검색 엔진",
+ "placeholder": "검색 엔진 선택"
+ },
+ "totalSearchResults": {
+ "label": "총 검색 결과",
+ "placeholder": "총 검색 결과 입력"
+ },
+ "visitSpecificWebsite": {
+ "label": "메시지에 언급된 웹사이트 방문"
+ }
+ },
+ "system": {
+ "heading": "시스템 설정",
+ "deleteChatHistory": {
+ "label": "채팅 기록 삭제",
+ "button": "삭제",
+ "confirm": "채팅 기록을 삭제하시겠습니까? 이 작업은 되돌릴 수 없습니다."
+ },
+ "export": {
+ "label": "채팅 기록, 지식 베이스, 프롬프트 내보내기",
+ "button": "데이터 내보내기",
+ "success": "내보내기 성공"
+ },
+ "import": {
+ "label": "채팅 기록, 지식 베이스, 프롬프트 가져오기",
+ "button": "데이터 가져오기",
+ "success": "가져오기 성공",
+ "error": "가져오기 오류"
+ }
+ },
+ "tts": {
+ "heading": "텍스트 음성 변환 설정",
+ "ttsEnabled": {
+ "label": "텍스트 음성 변환 활성화"
+ },
+ "ttsProvider": {
+ "label": "텍스트 음성 변환 제공자",
+ "placeholder": "제공자 선택"
+ },
+ "ttsVoice": {
+ "label": "텍스트 음성 변환 음성",
+ "placeholder": "음성 선택"
+ },
+ "ssmlEnabled": {
+ "label": "SSML (Speech Synthesis Markup Language) 활성화"
+ }
+ }
+ },
+ "manageModels": {
+ "title": "모델 관리",
+ "addBtn": "새 모델 추가",
+ "columns": {
+ "name": "이름",
+ "digest": "다이제스트",
+ "modifiedAt": "수정 일시",
+ "size": "크기",
+ "actions": "동작"
+ },
+ "expandedColumns": {
+ "parentModel": "상위 모델",
+ "format": "형식",
+ "family": "패밀리",
+ "parameterSize": "파라미터 크기",
+ "quantizationLevel": "양자화 수준"
+ },
+ "tooltip": {
+ "delete": "모델 삭제",
+ "repull": "모델 다시 가져오기"
+ },
+ "confirm": {
+ "delete": "이 모델을 정말 삭제하시겠습니까?",
+ "repull": "이 모델을 정말 다시 가져오시겠습니까?"
+ },
+ "modal": {
+ "title": "새 모델 추가",
+ "placeholder": "모델 이름 입력",
+ "pull": "모델 가져오기"
+ },
+ "notification": {
+ "pullModel": "모델 가져오는 중",
+ "pullModelDescription": "{{modelName}} 모델을 가져오는 중입니다. 자세한 내용은 확장 기능 아이콘을 확인하세요.",
+ "success": "성공",
+ "error": "오류",
+ "successDescription": "모델 가져오기가 완료되었습니다",
+ "successDeleteDescription": "모델 삭제가 완료되었습니다",
+ "someError": "문제가 발생했습니다. 나중에 다시 시도해 주세요."
+ }
+ },
+ "managePrompts": {
+ "title": "프롬프트 관리",
+ "addBtn": "새 프롬프트 추가",
+ "option1": "일반",
+ "option2": "RAG",
+ "questionPrompt": "질문 프롬프트",
+ "columns": {
+ "title": "제목",
+ "prompt": "프롬프트",
+ "type": "프롬프트 유형",
+ "actions": "동작"
+ },
+ "systemPrompt": "시스템 프롬프트",
+ "quickPrompt": "퀵 프롬프트",
+ "tooltip": {
+ "delete": "프롬프트 삭제",
+ "edit": "프롬프트 수정"
+ },
+ "confirm": {
+ "delete": "이 프롬프트를 정말 삭제하시겠습니까? 이 작업은 되돌릴 수 없습니다."
+ },
+ "modal": {
+ "addTitle": "새 프롬프트 추가",
+ "editTitle": "프롬프트 수정"
+ },
+ "segmented": {
+ "custom": "커스텀 프롬프트",
+ "copilot": "Copilot 프롬프트"
+ },
+ "form": {
+ "title": {
+ "label": "제목",
+ "placeholder": "훌륭한 프롬프트",
+ "required": "제목을 입력하세요"
+ },
+ "prompt": {
+ "label": "프롬프트",
+ "placeholder": "프롬프트 입력",
+ "required": "프롬프트를 입력하세요",
+ "help": "프롬프트 내에서 {key}를 변수로 사용할 수 있습니다.",
+ "missingTextPlaceholder": "프롬프트에 {text} 변수가 없습니다. 추가해 주세요."
+ },
+ "isSystem": {
+ "label": "시스템 프롬프트"
+ },
+ "btnSave": {
+ "saving": "프롬프트 추가 중...",
+ "save": "프롬프트 추가"
+ },
+ "btnEdit": {
+ "saving": "프롬프트 업데이트 중...",
+ "save": "프롬프트 업데이트"
+ }
+ },
+ "notification": {
+ "addSuccess": "프롬프트가 추가되었습니다",
+ "addSuccessDesc": "프롬프트가 정상적으로 추가되었습니다",
+ "error": "오류",
+ "someError": "문제가 발생했습니다. 나중에 다시 시도해 주세요.",
+ "updatedSuccess": "프롬프트가 업데이트되었습니다",
+ "updatedSuccessDesc": "프롬프트가 정상적으로 업데이트되었습니다",
+ "deletedSuccess": "프롬프트가 삭제되었습니다",
+ "deletedSuccessDesc": "프롬프트가 정상적으로 삭제되었습니다"
+ }
+ },
+ "manageShare": {
+ "title": "공유 관리",
+ "heading": "페이지 공유 URL 설정",
+ "form": {
+ "url": {
+ "label": "페이지 공유 URL",
+ "placeholder": "페이지 공유 URL 입력",
+ "required": "페이지 공유 URL을 입력해 주세요!",
+ "help": "개인정보 보호를 위해 페이지 공유를 자체 호스팅하고, 해당 URL을 여기에 입력할 수 있습니다. 자세히 보기"
+ }
+ },
+ "webshare": {
+ "heading": "웹 공유",
+ "columns": {
+ "title": "제목",
+ "url": "URL",
+ "actions": "동작"
+ },
+ "tooltip": {
+ "delete": "공유 삭제"
+ },
+ "confirm": {
+ "delete": "이 공유를 정말 삭제하시겠습니까? 이 작업은 되돌릴 수 없습니다."
+ },
+ "label": "페이지 공유 관리",
+ "description": "페이지 공유 기능을 활성화 또는 비활성화"
+ },
+ "notification": {
+ "pageShareSuccess": "페이지 공유 URL이 정상적으로 업데이트되었습니다",
+ "someError": "문제가 발생했습니다. 나중에 다시 시도해 주세요.",
+ "webShareDeleteSuccess": "웹 공유가 정상적으로 삭제되었습니다"
+ }
+ },
+ "ollamaSettings": {
+ "title": "Ollama 설정",
+ "heading": "Ollama 설정하기",
+ "settings": {
+ "ollamaUrl": {
+ "label": "Ollama URL",
+ "placeholder": "Ollama URL 입력"
+ },
+ "advanced": {
+ "label": "Ollama URL 고급 설정",
+ "urlRewriteEnabled": {
+ "label": "사용자 지정 Origin URL 활성화 또는 비활성화"
+ },
+ "rewriteUrl": {
+ "label": "사용자 지정 Origin URL",
+ "placeholder": "사용자 지정 Origin URL 입력"
+ },
+ "headers": {
+ "label": "사용자 지정 헤더",
+ "add": "헤더 추가",
+ "key": {
+ "label": "헤더 키",
+ "placeholder": "인증"
+ },
+ "value": {
+ "label": "헤더 값",
+ "placeholder": "베어러 토큰"
+ }
+ },
+ "help": "Page Assist에서 Ollama 연결에 문제가 있는 경우 사용자 지정 Origin URL을 설정할 수 있습니다. 설정에 대한 자세한 내용은 여기를 클릭하세요."
+ }
+ }
+ },
+ "manageSearch": {
+ "title": "웹 검색 관리",
+ "heading": "웹 검색 설정하기"
+ },
+ "about": {
+ "title": "소개",
+ "heading": "소개",
+ "chromeVersion": "Page Assist 버전",
+ "ollamaVersion": "Ollama 버전",
+ "support": "Page Assist 프로젝트는 다음 플랫폼에서 기부나 후원을 통해 지원할 수 있습니다:",
+ "koFi": "Ko-fi로 후원하기",
+ "githubSponsor": "GitHub에서 후원하기",
+ "githubRepo": "GitHub 저장소"
+ },
+ "manageKnowledge": {
+ "title": "지식 관리",
+ "heading": "지식 베이스 구성하기"
+ },
+ "rag": {
+ "title": "RAG 설정",
+ "ragSettings": {
+ "label": "RAG 설정",
+ "model": {
+ "label": "임베딩 모델",
+ "required": "모델을 선택해주세요",
+ "help": "`nomic-embed-text`와 같은 임베딩 모델 사용을 강력히 권장합니다.",
+ "placeholder": "모델 선택"
+ },
+ "chunkSize": {
+ "label": "청크 크기",
+ "placeholder": "청크 크기 입력",
+ "required": "청크 크기를 입력해주세요"
+ },
+ "chunkOverlap": {
+ "label": "청크 오버랩",
+ "placeholder": "청크 오버랩 입력",
+ "required": "청크 오버랩을 입력해주세요"
+ },
+ "totalFilePerKB": {
+ "label": "지식 베이스 기본 파일 업로드 제한",
+ "placeholder": "기본 파일 업로드 제한 입력 (예: 10)",
+ "required": "기본 파일 업로드 제한을 입력해주세요"
+ },
+ "noOfRetrievedDocs": {
+ "label": "검색 문서 수",
+ "placeholder": "검색 문서 수 입력",
+ "required": "검색 문서 수를 입력해주세요"
+ }
+ },
+ "prompt": {
+ "label": "RAG 프롬프트 설정",
+ "option1": "일반",
+ "option2": "웹",
+ "alert": "여기서 시스템 프롬프트를 설정하는 것은 더 이상 권장되지 않습니다. 프롬프트 추가 및 편집은 '프롬프트 관리' 섹션을 이용해주세요. 이 섹션은 향후 릴리스에서 제거될 예정입니다.",
+ "systemPrompt": "시스템 프롬프트",
+ "systemPromptPlaceholder": "시스템 프롬프트 입력",
+ "webSearchPrompt": "웹 검색 프롬프트",
+ "webSearchPromptHelp": "프롬프트에서 `{search_results}`를 제거하지 마세요.",
+ "webSearchPromptError": "웹 검색 프롬프트를 입력해주세요",
+ "webSearchPromptPlaceholder": "웹 검색 프롬프트 입력",
+ "webSearchFollowUpPrompt": "웹 검색 후속 프롬프트",
+ "webSearchFollowUpPromptHelp": "프롬프트에서 `{chat_history}`와 `{question}`를 제거하지 마세요.",
+ "webSearchFollowUpPromptError": "웹 검색 후속 프롬프트를 입력해주세요!",
+ "webSearchFollowUpPromptPlaceholder": "웹 검색 후속 프롬프트"
+ }
+ },
+ "chromeAiSettings": {
+ "title": "Chrome AI 설정"
+ }
+}
+
diff --git a/src/assets/locale/ko/sidepanel.json b/src/assets/locale/ko/sidepanel.json
new file mode 100644
index 00000000..b7a5eeab
--- /dev/null
+++ b/src/assets/locale/ko/sidepanel.json
@@ -0,0 +1,5 @@
+{
+ "tooltip": {
+ "embed": "페이지를 임베드하는 데 몇 분이 걸릴 수 있습니다. 잠시만 기다려 주세요..."
+ }
+}
diff --git a/src/assets/locale/ml/common.json b/src/assets/locale/ml/common.json
index bb1149aa..cd6868d8 100644
--- a/src/assets/locale/ml/common.json
+++ b/src/assets/locale/ml/common.json
@@ -69,6 +69,10 @@
"label": "സന്ദർഭങ്ങളുടെ എണ്ണം",
"placeholder": "സന്ദർഭങ്ങളുടെ സംഖ്യ നൽകുക (സ്ഥിരം: 2048)"
},
+ "numPredict": {
+ "label": "പരമാവധി ടോക്കണുകൾ (num_predict)",
+ "placeholder": "പരമാവധി ടോക്കൺ മൂല്യം നൽകുക (ഉദാ: 2048, 4096)"
+ },
"seed": {
"label": "സീഡ്",
"placeholder": "സീഡ് വില്യമ നൽകുക (ഉദാ: 1234)",
@@ -110,5 +114,7 @@
"older": "പഴയത്"
},
"pin": "പിൻ ചെയ്യുക",
- "unpin": "അൺപിൻ ചെയ്യുക"
+ "unpin": "അൺപിൻ ചെയ്യുക",
+ "generationInfo": "ജനറേഷൻ വിവരങ്ങൾ"
+
}
\ No newline at end of file
diff --git a/src/assets/locale/ml/openai.json b/src/assets/locale/ml/openai.json
index 6fe486e0..5bbaaeae 100644
--- a/src/assets/locale/ml/openai.json
+++ b/src/assets/locale/ml/openai.json
@@ -42,7 +42,7 @@
"delete": "ഇല്ലാതാക്കുക",
"edit": "തിരുത്തുക",
"newModel": "ദാതാവിലേക്ക് മോഡലുകൾ ചേർക്കുക",
- "noNewModel": "LMStudio-യ്ക്കായി, ഞങ്ങൾ ഡൈനാമിക്കായി ലഭ്യമാക്കുന്നു. മാനുവലായി ചേർക്കേണ്ടതില്ല.",
+ "noNewModel": "LMStudio, Ollama, Llamafile-യ്ക്കായി, ഞങ്ങൾ ഡൈനാമിക്കായി ലഭ്യമാക്കുന്നു. മാനുവലായി ചേർക്കേണ്ടതില്ല.",
"searchModel": "മോഡൽ തിരയുക",
"selectAll": "എല്ലാം തിരഞ്ഞെടുക്കുക",
"save": "സംരക്ഷിക്കുക",
diff --git a/src/assets/locale/ml/option.json b/src/assets/locale/ml/option.json
index f8fcf70e..1004b13f 100644
--- a/src/assets/locale/ml/option.json
+++ b/src/assets/locale/ml/option.json
@@ -8,5 +8,6 @@
"somethingWentWrong": "എന്തോ തെറ്റായി",
"deleteHistoryConfirmation": "നിങ്ങളുടെ ചാറ്റ് ചരിത്രം ഇല്ലാതാക്കണമെന്ന് തീർച്ചയാണോ?",
"editHistoryTitle": "ചാറ്റ് title എഡിറ്റുചെയ്യുക",
- "validationSelectModel": "തുടരുന്നതിന് ഒരു മോഡല് തിരഞ്ഞെടുക്കുക"
+ "validationSelectModel": "തുടരുന്നതിന് ഒരു മോഡല് തിരഞ്ഞെടുക്കുക",
+ "temporaryChat": "താൽക്കാലിക ചാറ്റ്"
}
\ No newline at end of file
diff --git a/src/assets/locale/no/common.json b/src/assets/locale/no/common.json
index 6665280e..1720d1f7 100644
--- a/src/assets/locale/no/common.json
+++ b/src/assets/locale/no/common.json
@@ -70,6 +70,10 @@
"label": "Kontekstlengde",
"placeholder": "Skriv inn kontekstlengdeverdi (standard: 2048)"
},
+ "numPredict": {
+ "label": "Maks Tokens (num_predict)",
+ "placeholder": "Skriv inn Maks Tokens-verdi (f.eks. 2048, 4096)"
+ },
"seed": {
"label": "Seed",
"placeholder": "Skriv inn seedverdi (f.eks. 1234)",
@@ -112,5 +116,6 @@
"older": "Eldre"
},
"pin": "Fest",
- "unpin": "Løsne"
+ "unpin": "Løsne",
+ "generationInfo": "Generasjonsinformasjon"
}
\ No newline at end of file
diff --git a/src/assets/locale/no/openai.json b/src/assets/locale/no/openai.json
index 91ddc3d7..aa748bbc 100644
--- a/src/assets/locale/no/openai.json
+++ b/src/assets/locale/no/openai.json
@@ -42,7 +42,7 @@
"delete": "Slett",
"edit": "Rediger",
"newModel": "Legg til modeller for leverandør",
- "noNewModel": "For LMStudio henter vi dynamisk. Ingen manuell tillegging nødvendig.",
+ "noNewModel": "For LMStudio, Ollama, Llamafile, henter vi dynamisk. Ingen manuell tillegging nødvendig.",
"searchModel": "Søk etter modell",
"selectAll": "Velg alle",
"save": "Lagre",
diff --git a/src/assets/locale/no/option.json b/src/assets/locale/no/option.json
index 76d335e0..64adb7b4 100644
--- a/src/assets/locale/no/option.json
+++ b/src/assets/locale/no/option.json
@@ -8,5 +8,6 @@
"somethingWentWrong": "Noe gikk galt",
"validationSelectModel": "Vennligst velg en modell for å fortsette",
"deleteHistoryConfirmation": "Er du sikker på at du vil slette denne historikken?",
- "editHistoryTitle": "Skriv inn en ny tittel"
+ "editHistoryTitle": "Skriv inn en ny tittel",
+ "temporaryChat": "Midlertidig Chat"
}
diff --git a/src/assets/locale/pt-BR/common.json b/src/assets/locale/pt-BR/common.json
index ba4f2522..660514d2 100644
--- a/src/assets/locale/pt-BR/common.json
+++ b/src/assets/locale/pt-BR/common.json
@@ -70,6 +70,10 @@
"label": "Número de Contextos",
"placeholder": "Digite o valor do Número de Contextos (padrão: 2048)"
},
+ "numPredict": {
+ "label": "Máximo de Tokens (num_predict)",
+ "placeholder": "Digite o valor do Máximo de Tokens (ex: 2048, 4096)"
+ },
"seed": {
"label": "Semente",
"placeholder": "Digite o valor da Semente (ex: 1234)",
@@ -111,5 +115,6 @@
"older": "Mais Antigos"
},
"pin": "Fixar",
- "unpin": "Desafixar"
+ "unpin": "Desafixar",
+ "generationInfo": "Informações de Geração"
}
\ No newline at end of file
diff --git a/src/assets/locale/pt-BR/option.json b/src/assets/locale/pt-BR/option.json
index de29ff13..eeffe1cf 100644
--- a/src/assets/locale/pt-BR/option.json
+++ b/src/assets/locale/pt-BR/option.json
@@ -8,5 +8,6 @@
"somethingWentWrong": "Algo deu errado",
"validationSelectModel": "Por favor, selecione um modelo para continuar",
"deleteHistoryConfirmation": "Tem certeza de que deseja excluir este histórico?",
- "editHistoryTitle": "Digite um novo título"
+ "editHistoryTitle": "Digite um novo título",
+ "temporaryChat": "Chat Temporário"
}
\ No newline at end of file
diff --git a/src/assets/locale/ru/common.json b/src/assets/locale/ru/common.json
index b619def6..31291a06 100644
--- a/src/assets/locale/ru/common.json
+++ b/src/assets/locale/ru/common.json
@@ -70,6 +70,10 @@
"label": "Количество контекстов",
"placeholder": "Введите значение количества контекстов (по умолчанию: 2048)"
},
+ "numPredict": {
+ "label": "Максимальное количество токенов (num_predict)",
+ "placeholder": "Введите значение максимального количества токенов (например, 2048, 4096)"
+ },
"seed": {
"label": "Сид",
"placeholder": "Введите значение сида (например, 1234)",
@@ -111,5 +115,6 @@
"older": "Ранее"
},
"pin": "Закрепить",
- "unpin": "Открепить"
+ "unpin": "Открепить",
+ "generationInfo": "Информация о генерации"
}
\ No newline at end of file
diff --git a/src/assets/locale/ru/openai.json b/src/assets/locale/ru/openai.json
index 436c6762..2b63545e 100644
--- a/src/assets/locale/ru/openai.json
+++ b/src/assets/locale/ru/openai.json
@@ -42,7 +42,7 @@
"delete": "Удалить",
"edit": "Редактировать",
"newModel": "Добавить модели к провайдеру",
- "noNewModel": "Для LMStudio мы загружаем динамически. Ручное добавление не требуется.",
+ "noNewModel": "Для LMStudio, Ollama, Llamafile, мы загружаем динамически. Ручное добавление не требуется.",
"searchModel": "Поиск модели",
"selectAll": "Выбрать все",
"save": "Сохранить",
diff --git a/src/assets/locale/ru/option.json b/src/assets/locale/ru/option.json
index f15c1067..bd8f9877 100644
--- a/src/assets/locale/ru/option.json
+++ b/src/assets/locale/ru/option.json
@@ -8,5 +8,6 @@
"somethingWentWrong": "Что-то пошло не так",
"validationSelectModel": "Пожалуйста, выберите модель, чтобы продолжить",
"deleteHistoryConfirmation": "Вы уверены, что хотите удалить эту историю?",
- "editHistoryTitle": "Введите новое название"
+ "editHistoryTitle": "Введите новое название",
+ "temporaryChat": "Временный чат"
}
diff --git a/src/assets/locale/sv/common.json b/src/assets/locale/sv/common.json
index 31272676..4698dc7b 100644
--- a/src/assets/locale/sv/common.json
+++ b/src/assets/locale/sv/common.json
@@ -70,6 +70,10 @@
"label": "Antal kontexter",
"placeholder": "Ange antal kontextvärden (standard: 2048)"
},
+ "numPredict": {
+ "label": "Max antal tokens (num_predict)",
+ "placeholder": "Ange Max antal tokens värde (t.ex. 2048, 4096)"
+ },
"seed": {
"label": "Frö",
"placeholder": "Ange frövärde (t.ex. 1234)",
@@ -116,5 +120,6 @@
"older": "Äldre"
},
"pin": "Fäst",
- "unpin": "Ta bort fäst"
+ "unpin": "Ta bort fäst",
+ "generationInfo": "Generationsinformation"
}
diff --git a/src/assets/locale/sv/option.json b/src/assets/locale/sv/option.json
index d7017eec..98e10127 100644
--- a/src/assets/locale/sv/option.json
+++ b/src/assets/locale/sv/option.json
@@ -8,5 +8,6 @@
"somethingWentWrong": "Något gick fel",
"validationSelectModel": "Vänligen välj en modell för att fortsätta",
"deleteHistoryConfirmation": "Är du säker på att du vill radera denna historik?",
- "editHistoryTitle": "Ange en ny titel"
+ "editHistoryTitle": "Ange en ny titel",
+ "temporaryChat": "Tillfällig chatt"
}
diff --git a/src/assets/locale/zh/common.json b/src/assets/locale/zh/common.json
index cd58e059..080776a9 100644
--- a/src/assets/locale/zh/common.json
+++ b/src/assets/locale/zh/common.json
@@ -70,6 +70,10 @@
"label": "上下文数量",
"placeholder": "输入上下文数量(默认:2048)"
},
+ "numPredict": {
+ "label": "最大令牌数 (num_predict)",
+ "placeholder": "输入最大令牌数(例如:2048、4096)"
+ },
"seed": {
"label": "随机种子",
"placeholder": "输入随机种子值(例如:1234)",
@@ -111,5 +115,6 @@
"older": "更早"
},
"pin": "置顶",
- "unpin": "取消置顶"
+ "unpin": "取消置顶",
+ "generationInfo": "生成信息"
}
\ No newline at end of file
diff --git a/src/assets/locale/zh/openai.json b/src/assets/locale/zh/openai.json
index 1b021847..00583f33 100644
--- a/src/assets/locale/zh/openai.json
+++ b/src/assets/locale/zh/openai.json
@@ -42,7 +42,7 @@
"delete": "删除",
"edit": "编辑",
"newModel": "向提供商添加模型",
- "noNewModel": "对于 LMStudio,我们动态获取。无需手动添加。",
+ "noNewModel": "对于 LMStudio, Ollama, Llamafile,我们动态获取。无需手动添加。",
"searchModel": "搜索模型",
"selectAll": "全选",
"save": "保存",
diff --git a/src/assets/locale/zh/option.json b/src/assets/locale/zh/option.json
index 095daca1..cba6731c 100644
--- a/src/assets/locale/zh/option.json
+++ b/src/assets/locale/zh/option.json
@@ -8,5 +8,6 @@
"somethingWentWrong": "出现了错误",
"validationSelectModel": "请选择一个模型以继续",
"deleteHistoryConfirmation": "你确定要删除这个历史记录吗?",
- "editHistoryTitle": "输入一个新的标题"
+ "editHistoryTitle": "输入一个新的标题",
+ "temporaryChat": "临时聊天"
}
\ No newline at end of file
diff --git a/src/chain/chat-with-website.ts b/src/chain/chat-with-website.ts
index 3b909d4e..1c6b5d39 100644
--- a/src/chain/chat-with-website.ts
+++ b/src/chain/chat-with-website.ts
@@ -1,3 +1,4 @@
+//@ts-nocheck
import { BaseLanguageModel } from "langchain/base_language";
import { Document } from "@langchain/core/documents";
import {
@@ -28,8 +29,8 @@ export function groupMessagesByConversation(messages: ChatHistory) {
const groupedMessages = [];
for (let i = 0; i < messages.length; i += 2) {
groupedMessages.push({
- human: messages[i].content,
- ai: messages[i + 1].content,
+ human: messages[i]?.content,
+ ai: messages[i + 1]?.content,
});
}
@@ -38,7 +39,7 @@ export function groupMessagesByConversation(messages: ChatHistory) {
const formatChatHistoryAsString = (history: BaseMessage[]) => {
return history
- .map((message) => `${message._getType()}: ${message.content}`)
+ .map((message) => `${message._getType()}: ${message?.content}`)
.join("\n");
};
diff --git a/src/chain/chat-with-x.ts b/src/chain/chat-with-x.ts
index 4a638290..296277f5 100644
--- a/src/chain/chat-with-x.ts
+++ b/src/chain/chat-with-x.ts
@@ -1,3 +1,4 @@
+//@ts-nocheck
import { BaseLanguageModel } from "@langchain/core/language_models/base"
import { Document } from "@langchain/core/documents"
import {
diff --git a/src/components/Common/Playground/GenerationInfo.tsx b/src/components/Common/Playground/GenerationInfo.tsx
new file mode 100644
index 00000000..4e34710b
--- /dev/null
+++ b/src/components/Common/Playground/GenerationInfo.tsx
@@ -0,0 +1,65 @@
+type GenerationMetrics = {
+ total_duration?: number
+ load_duration?: number
+ prompt_eval_count?: number
+ prompt_eval_duration?: number
+ eval_count?: number
+ eval_duration?: number
+ context?: string
+ response?: string
+}
+
+type Props = {
+ generationInfo: GenerationMetrics
+}
+
+export const GenerationInfo = ({ generationInfo }: Props) => {
+ if (!generationInfo) return null
+
+ const calculateTokensPerSecond = (
+ evalCount?: number,
+ evalDuration?: number
+ ) => {
+ if (!evalCount || !evalDuration) return 0
+ return (evalCount / evalDuration) * 1e9
+ }
+
+ const formatDuration = (nanoseconds?: number) => {
+ if (!nanoseconds) return "0ms"
+ const ms = nanoseconds / 1e6
+ if (ms < 1) return `${ms.toFixed(3)}ms`
+ if (ms < 1000) return `${Math.round(ms)}ms`
+ return `${(ms / 1000).toFixed(2)}s`
+ }
+
+ const metricsToDisplay = {
+ ...generationInfo,
+ ...(generationInfo?.eval_count && generationInfo?.eval_duration
+ ? {
+ tokens_per_second: calculateTokensPerSecond(
+ generationInfo.eval_count,
+ generationInfo.eval_duration
+ ).toFixed(2)
+ }
+ : {})
+ }
+
+ return (
+
+
+ {Object.entries(metricsToDisplay)
+ .filter(([key]) => key !== "model")
+ .map(([key, value]) => (
+
+
{key}
+
+ {key.includes("duration")
+ ? formatDuration(value as number)
+ : String(value)}
+
+
+ ))}
+
+
+ )
+}
diff --git a/src/components/Common/Playground/Message.tsx b/src/components/Common/Playground/Message.tsx
index 0c6299c6..925fe4c5 100644
--- a/src/components/Common/Playground/Message.tsx
+++ b/src/components/Common/Playground/Message.tsx
@@ -1,10 +1,11 @@
import Markdown from "../../Common/Markdown"
import React from "react"
-import { Tag, Image, Tooltip, Collapse } from "antd"
+import { Tag, Image, Tooltip, Collapse, Popover } from "antd"
import { WebSearch } from "./WebSearch"
import {
CheckIcon,
ClipboardIcon,
+ InfoIcon,
Pen,
PlayIcon,
RotateCcw,
@@ -16,6 +17,7 @@ import { MessageSource } from "./MessageSource"
import { useTTS } from "@/hooks/useTTS"
import { tagColors } from "@/utils/color"
import { removeModelSuffix } from "@/db/models"
+import { GenerationInfo } from "./GenerationInfo"
type Props = {
message: string
@@ -37,6 +39,7 @@ type Props = {
hideEditAndRegenerate?: boolean
onSourceClick?: (source: any) => void
isTTSEnabled?: boolean
+ generationInfo?: any
}
export const PlaygroundMessage = (props: Props) => {
@@ -206,6 +209,18 @@ export const PlaygroundMessage = (props: Props) => {
)}
+ {props.generationInfo && (
+
+ }
+ title={t("generationInfo")}>
+
+
+ )}
+
{!props.hideEditAndRegenerate &&
props.currentMessageIndex === props.totalMessages - 1 && (
diff --git a/src/components/Common/ProviderIcon.tsx b/src/components/Common/ProviderIcon.tsx
index 38ba5043..8142adc6 100644
--- a/src/components/Common/ProviderIcon.tsx
+++ b/src/components/Common/ProviderIcon.tsx
@@ -6,6 +6,7 @@ import { LMStudioIcon } from "../Icons/LMStudio"
import { OpenAiIcon } from "../Icons/OpenAI"
import { TogtherMonoIcon } from "../Icons/Togther"
import { OpenRouterIcon } from "../Icons/OpenRouter"
+import { LLamaFile } from "../Icons/Llamafile"
export const ProviderIcons = ({
provider,
@@ -31,6 +32,8 @@ export const ProviderIcons = ({
return
case "openrouter":
return
+ case "llamafile":
+ return
default:
return
}
diff --git a/src/components/Common/Settings/CurrentChatModelSettings.tsx b/src/components/Common/Settings/CurrentChatModelSettings.tsx
index 3e5c74f9..e950bd9e 100644
--- a/src/components/Common/Settings/CurrentChatModelSettings.tsx
+++ b/src/components/Common/Settings/CurrentChatModelSettings.tsx
@@ -1,3 +1,5 @@
+import { getPromptById } from "@/db"
+import { useMessageOption } from "@/hooks/useMessageOption"
import { getAllModelSettings } from "@/services/model-settings"
import { useStoreChatModelSettings } from "@/store/model"
import { useQuery } from "@tanstack/react-query"
@@ -27,10 +29,20 @@ export const CurrentChatModelSettings = ({
const { t } = useTranslation("common")
const [form] = Form.useForm()
const cUserSettings = useStoreChatModelSettings()
+ const { selectedSystemPrompt } = useMessageOption()
const { isPending: isLoading } = useQuery({
queryKey: ["fetchModelConfig2", open],
queryFn: async () => {
const data = await getAllModelSettings()
+
+ let tempSystemPrompt = "";
+
+ // i hate this method but i need this feature so badly that i need to do this
+ if (selectedSystemPrompt) {
+ const prompt = await getPromptById(selectedSystemPrompt)
+ tempSystemPrompt = prompt?.content ?? ""
+ }
+
form.setFieldsValue({
temperature: cUserSettings.temperature ?? data.temperature,
topK: cUserSettings.topK ?? data.topK,
@@ -39,14 +51,17 @@ export const CurrentChatModelSettings = ({
numCtx: cUserSettings.numCtx ?? data.numCtx,
seed: cUserSettings.seed,
numGpu: cUserSettings.numGpu ?? data.numGpu,
- systemPrompt: cUserSettings.systemPrompt ?? ""
+ numPredict: cUserSettings.numPredict ?? data.numPredict,
+ systemPrompt: cUserSettings.systemPrompt ?? tempSystemPrompt
})
return data
},
enabled: open,
- refetchOnMount: true
+ refetchOnMount: false,
+ refetchOnWindowFocus: false
})
+
const renderBody = () => {
return (
<>
@@ -115,6 +130,15 @@ export const CurrentChatModelSettings = ({
/>
+
+
+
+
+>((props, ref) => {
+ return (
+
+ )
+})
diff --git a/src/components/Layouts/Header.tsx b/src/components/Layouts/Header.tsx
index 3d3ba89b..7f3e1531 100644
--- a/src/components/Layouts/Header.tsx
+++ b/src/components/Layouts/Header.tsx
@@ -21,6 +21,7 @@ import { Select, Tooltip } from "antd"
import { getAllPrompts } from "@/db"
import { ShareBtn } from "~/components/Common/ShareBtn"
import { ProviderIcons } from "../Common/ProviderIcon"
+import { NewChat } from "./NewChat"
type Props = {
setSidebarOpen: (open: boolean) => void
setOpenModelSettings: (open: boolean) => void
@@ -45,12 +46,12 @@ export const Header: React.FC = ({
setSelectedSystemPrompt,
messages,
streaming,
- historyId
+ historyId,
+ temporaryChat
} = useMessageOption()
const {
data: models,
isLoading: isModelsLoading,
- isFetching: isModelsFetching
} = useQuery({
queryKey: ["fetchModel"],
queryFn: () => fetchChatModels({ returnEmpty: true }),
@@ -86,7 +87,9 @@ export const Header: React.FC = ({
}
return (
-
+
{pathname !== "/" && (
@@ -104,14 +107,9 @@ export const Header: React.FC
= ({
-
-
-
+
{"/"}
diff --git a/src/components/Layouts/NewChat.tsx b/src/components/Layouts/NewChat.tsx
new file mode 100644
index 00000000..e20681ae
--- /dev/null
+++ b/src/components/Layouts/NewChat.tsx
@@ -0,0 +1,57 @@
+import { SquarePen, MoreHorizontal, TimerReset } from "lucide-react"
+import { useTranslation } from "react-i18next"
+import { Dropdown, Switch } from "antd"
+import type { MenuProps } from "antd"
+import { useMessageOption } from "@/hooks/useMessageOption"
+
+type Props = {
+ clearChat: () => void
+}
+
+export const NewChat: React.FC
= ({ clearChat }) => {
+ const { t } = useTranslation(["option", "common"])
+
+ const { temporaryChat, setTemporaryChat, messages } = useMessageOption()
+
+ const items: MenuProps["items"] = [
+ {
+ key: "1",
+ label: (
+
+ )
+ }
+ ]
+ return (
+
+
+
+
+
+
+ )
+}
diff --git a/src/components/Option/Playground/PlaygroundChat.tsx b/src/components/Option/Playground/PlaygroundChat.tsx
index 5f2adfe2..aaf34629 100644
--- a/src/components/Option/Playground/PlaygroundChat.tsx
+++ b/src/components/Option/Playground/PlaygroundChat.tsx
@@ -54,6 +54,7 @@ export const PlaygroundChat = () => {
setIsSourceOpen(true)
}}
isTTSEnabled={ttsEnabled}
+ generationInfo={message?.generationInfo}
/>
))}
{messages.length > 0 && (
diff --git a/src/components/Option/Playground/PlaygroundForm.tsx b/src/components/Option/Playground/PlaygroundForm.tsx
index be312e22..5f19861d 100644
--- a/src/components/Option/Playground/PlaygroundForm.tsx
+++ b/src/components/Option/Playground/PlaygroundForm.tsx
@@ -36,7 +36,8 @@ export const PlaygroundForm = ({ dropedFile }: Props) => {
selectedQuickPrompt,
textareaRef,
setSelectedQuickPrompt,
- selectedKnowledge
+ selectedKnowledge,
+ temporaryChat
} = useMessageOption()
const isMobile = () => {
@@ -159,7 +160,7 @@ export const PlaygroundForm = ({ dropedFile }: Props) => {
e.preventDefault()
stopListening()
form.onSubmit(async (value) => {
- if (value.message.trim().length === 0) {
+ if (value.message.trim().length === 0 && value.image.length === 0) {
return
}
if (!selectedModel || selectedModel.length === 0) {
@@ -190,7 +191,10 @@ export const PlaygroundForm = ({ dropedFile }: Props) => {
}
return (
-
+
-
+
-
-
{!selectedKnowledge && (
diff --git a/src/components/Option/Sidebar.tsx b/src/components/Option/Sidebar.tsx
index 1c9fd545..6e6881d9 100644
--- a/src/components/Option/Sidebar.tsx
+++ b/src/components/Option/Sidebar.tsx
@@ -34,7 +34,8 @@ export const Sidebar = ({ onClose }: Props) => {
setHistoryId,
historyId,
clearChat,
- setSelectedModel
+ setSelectedModel,
+ temporaryChat
} = useMessageOption()
const { t } = useTranslation(["option", "common"])
const client = useQueryClient()
@@ -126,7 +127,7 @@ export const Sidebar = ({ onClose }: Props) => {
})
return (
-
+
{status === "success" && chatHistories.length === 0 && (
@@ -244,4 +245,4 @@ export const Sidebar = ({ onClose }: Props) => {
)}
)
-}
+}
\ No newline at end of file
diff --git a/src/components/Sidepanel/Chat/body.tsx b/src/components/Sidepanel/Chat/body.tsx
index 61d90715..f9f14079 100644
--- a/src/components/Sidepanel/Chat/body.tsx
+++ b/src/components/Sidepanel/Chat/body.tsx
@@ -47,6 +47,7 @@ export const SidePanelBody = () => {
setIsSourceOpen(true)
}}
isTTSEnabled={ttsEnabled}
+ generationInfo={message?.generationInfo}
/>
))}
diff --git a/src/components/Sidepanel/Chat/form.tsx b/src/components/Sidepanel/Chat/form.tsx
index e1cc3f01..618431d4 100644
--- a/src/components/Sidepanel/Chat/form.tsx
+++ b/src/components/Sidepanel/Chat/form.tsx
@@ -75,10 +75,10 @@ export const SidepanelForm = ({ dropedFile }: Props) => {
) {
e.preventDefault()
form.onSubmit(async (value) => {
- await stopListening()
- if (value.message.trim().length === 0) {
+ if (value.message.trim().length === 0 && value.image.length === 0) {
return
}
+ await stopListening()
if (!selectedModel || selectedModel.length === 0) {
form.setFieldError("message", t("formError.noModel"))
return
@@ -237,6 +237,9 @@ export const SidepanelForm = ({ dropedFile }: Props) => {
}
}
await stopListening()
+ if (value.message.trim().length === 0 && value.image.length === 0) {
+ return
+ }
form.reset()
textAreaFocus()
await sendMessage({
@@ -260,7 +263,6 @@ export const SidepanelForm = ({ dropedFile }: Props) => {
onKeyDown={(e) => handleKeyDown(e)}
ref={textareaRef}
className="px-2 py-2 w-full resize-none bg-transparent focus-within:outline-none focus:ring-0 focus-visible:ring-0 ring-0 dark:ring-0 border-0 dark:text-gray-100"
- required
onPaste={handlePaste}
rows={1}
style={{ minHeight: "60px" }}
diff --git a/src/db/index.ts b/src/db/index.ts
index 4a775532..da837b29 100644
--- a/src/db/index.ts
+++ b/src/db/index.ts
@@ -33,6 +33,7 @@ type Message = {
search?: WebSearch
createdAt: number
messageType?: string
+ generationInfo?: any
}
type Webshare = {
@@ -254,7 +255,8 @@ export const saveMessage = async (
images: string[],
source?: any[],
time?: number,
- message_type?: string
+ message_type?: string,
+ generationInfo?: any
) => {
const id = generateID()
let createdAt = Date.now()
@@ -270,7 +272,8 @@ export const saveMessage = async (
images,
createdAt,
sources: source,
- messageType: message_type
+ messageType: message_type,
+ generationInfo: generationInfo
}
const db = new PageAssitDatabase()
await db.addMessage(message)
@@ -298,7 +301,8 @@ export const formatToMessage = (messages: MessageHistory): MessageType[] => {
message: message.content,
name: message.name,
sources: message?.sources || [],
- images: message.images || []
+ images: message.images || [],
+ generationInfo: message?.generationInfo,
}
})
}
@@ -351,10 +355,14 @@ export const updateMessageByIndex = async (
index: number,
message: string
) => {
+ try {
const db = new PageAssitDatabase()
const chatHistory = (await db.getChatHistory(history_id)).reverse()
chatHistory[index].content = message
await db.db.set({ [history_id]: chatHistory.reverse() })
+ } catch(e) {
+ // temp chat will break
+ }
}
export const deleteChatForEdit = async (history_id: string, index: number) => {
diff --git a/src/db/models.ts b/src/db/models.ts
index 4575c677..928beb99 100644
--- a/src/db/models.ts
+++ b/src/db/models.ts
@@ -24,6 +24,8 @@ export const removeModelSuffix = (id: string) => {
return id
.replace(/_model-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{3,4}-[a-f0-9]{4}/, "")
.replace(/_lmstudio_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/, "")
+ .replace(/_llamafile_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/, "")
+ .replace(/_ollama2_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/, "")
}
export const isLMStudioModel = (model: string) => {
const lmstudioModelRegex =
@@ -31,6 +33,16 @@ export const isLMStudioModel = (model: string) => {
return lmstudioModelRegex.test(model)
}
+export const isLlamafileModel = (model: string) => {
+ const llamafileModelRegex =
+ /_llamafile_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/
+ return llamafileModelRegex.test(model)
+}
+export const isOllamaModel = (model: string) => {
+ const ollamaModelRegex =
+ /_ollama2_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/
+ return ollamaModelRegex.test(model)
+}
export const getLMStudioModelId = (
model: string
): { model_id: string; provider_id: string } => {
@@ -44,10 +56,45 @@ export const getLMStudioModelId = (
}
return null
}
+export const getOllamaModelId = (
+ model: string
+): { model_id: string; provider_id: string } => {
+ const ollamaModelRegex =
+ /_ollama2_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/
+ const match = model.match(ollamaModelRegex)
+ if (match) {
+ const modelId = match[0]
+ const providerId = match[0].replace("_ollama2_openai-", "")
+ return { model_id: modelId, provider_id: providerId }
+ }
+ return null
+}
+export const getLlamafileModelId = (
+ model: string
+): { model_id: string; provider_id: string } => {
+ const llamafileModelRegex =
+ /_llamafile_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/
+ const match = model.match(llamafileModelRegex)
+ if (match) {
+ const modelId = match[0]
+ const providerId = match[0].replace("_llamafile_openai-", "")
+ return { model_id: modelId, provider_id: providerId }
+ }
+ return null
+}
export const isCustomModel = (model: string) => {
if (isLMStudioModel(model)) {
return true
}
+
+ if (isLlamafileModel(model)) {
+ return true
+ }
+
+ if (isOllamaModel(model)) {
+ return true
+ }
+
const customModelRegex =
/_model-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{3,4}-[a-f0-9]{4}/
return customModelRegex.test(model)
@@ -201,6 +248,44 @@ export const getModelInfo = async (id: string) => {
}
}
+
+ if (isLlamafileModel(id)) {
+ const llamafileId = getLlamafileModelId(id)
+ if (!llamafileId) {
+ throw new Error("Invalid LMStudio model ID")
+ }
+ return {
+ model_id: id.replace(
+ /_llamafile_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/,
+ ""
+ ),
+ provider_id: `openai-${llamafileId.provider_id}`,
+ name: id.replace(
+ /_llamafile_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/,
+ ""
+ )
+ }
+ }
+
+
+ if (isOllamaModel(id)) {
+ const ollamaId = getOllamaModelId(id)
+ if (!ollamaId) {
+ throw new Error("Invalid LMStudio model ID")
+ }
+ return {
+ model_id: id.replace(
+ /_ollama2_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/,
+ ""
+ ),
+ provider_id: `openai-${ollamaId.provider_id}`,
+ name: id.replace(
+ /_ollama2_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/,
+ ""
+ )
+ }
+ }
+
const model = await db.getById(id)
return model
}
@@ -264,6 +349,48 @@ export const dynamicFetchLMStudio = async ({
return lmstudioModels
}
+export const dynamicFetchOllama2 = async ({
+ baseUrl,
+ providerId
+}: {
+ baseUrl: string
+ providerId: string
+}) => {
+ const models = await getAllOpenAIModels(baseUrl)
+ const ollama2Models = models.map((e) => {
+ return {
+ name: e?.name || e?.id,
+ id: `${e?.id}_ollama2_${providerId}`,
+ provider: providerId,
+ lookup: `${e?.id}_${providerId}`,
+ provider_id: providerId
+ }
+ })
+
+ return ollama2Models
+}
+
+export const dynamicFetchLlamafile = async ({
+ baseUrl,
+ providerId
+}: {
+ baseUrl: string
+ providerId: string
+}) => {
+ const models = await getAllOpenAIModels(baseUrl)
+ const llamafileModels = models.map((e) => {
+ return {
+ name: e?.name || e?.id,
+ id: `${e?.id}_llamafile_${providerId}`,
+ provider: providerId,
+ lookup: `${e?.id}_${providerId}`,
+ provider_id: providerId
+ }
+ })
+
+ return llamafileModels
+}
+
export const ollamaFormatAllCustomModels = async (
modelType: "all" | "chat" | "embedding" = "all"
) => {
@@ -276,6 +403,14 @@ export const ollamaFormatAllCustomModels = async (
(provider) => provider.provider === "lmstudio"
)
+ const llamafileProviders = allProviders.filter(
+ (provider) => provider.provider === "llamafile"
+ )
+
+ const ollamaProviders = allProviders.filter(
+ (provider) => provider.provider === "ollama2"
+ )
+
const lmModelsPromises = lmstudioProviders.map((provider) =>
dynamicFetchLMStudio({
baseUrl: provider.baseUrl,
@@ -283,16 +418,39 @@ export const ollamaFormatAllCustomModels = async (
})
)
+ const llamafileModelsPromises = llamafileProviders.map((provider) =>
+ dynamicFetchLlamafile({
+ baseUrl: provider.baseUrl,
+ providerId: provider.id
+ })
+ )
+
+ const ollamaModelsPromises = ollamaProviders.map((provider) =>
+ dynamicFetchOllama2({
+ baseUrl: provider.baseUrl,
+ providerId: provider.id
+ }))
+
const lmModelsFetch = await Promise.all(lmModelsPromises)
+ const llamafileModelsFetch = await Promise.all(llamafileModelsPromises)
+
+ const ollamaModelsFetch = await Promise.all(ollamaModelsPromises)
+
const lmModels = lmModelsFetch.flat()
+ const llamafileModels = llamafileModelsFetch.flat()
+
+ const ollama2Models = ollamaModelsFetch.flat()
+
// merge allModels and lmModels
const allModlesWithLMStudio = [
...(modelType !== "all"
? allModles.filter((model) => model.model_type === modelType)
: allModles),
- ...lmModels
+ ...lmModels,
+ ...llamafileModels,
+ ...ollama2Models
]
const ollamaModels = allModlesWithLMStudio.map((model) => {
diff --git a/src/hooks/chat-helper/index.ts b/src/hooks/chat-helper/index.ts
index 18f89bea..3f725c02 100644
--- a/src/hooks/chat-helper/index.ts
+++ b/src/hooks/chat-helper/index.ts
@@ -118,7 +118,7 @@ export const saveMessageOnSuccess = async ({
fullText,
source,
message_source = "web-ui",
- message_type
+ message_type, generationInfo
}: {
historyId: string | null
setHistoryId: (historyId: string) => void
@@ -130,6 +130,7 @@ export const saveMessageOnSuccess = async ({
source: any[]
message_source?: "copilot" | "web-ui",
message_type?: string
+ generationInfo?: any
}) => {
if (historyId) {
if (!isRegenerate) {
@@ -141,7 +142,8 @@ export const saveMessageOnSuccess = async ({
[image],
[],
1,
- message_type
+ message_type,
+ generationInfo
)
}
await saveMessage(
@@ -152,7 +154,8 @@ export const saveMessageOnSuccess = async ({
[],
source,
2,
- message_type
+ message_type,
+ generationInfo
)
await setLastUsedChatModel(historyId, selectedModel!)
} else {
@@ -166,7 +169,8 @@ export const saveMessageOnSuccess = async ({
[image],
[],
1,
- message_type
+ message_type,
+ generationInfo
)
await saveMessage(
newHistoryId.id,
@@ -176,7 +180,8 @@ export const saveMessageOnSuccess = async ({
[],
source,
2,
- message_type
+ message_type,
+ generationInfo
)
setHistoryId(newHistoryId.id)
await setLastUsedChatModel(newHistoryId.id, selectedModel!)
diff --git a/src/hooks/useMessage.tsx b/src/hooks/useMessage.tsx
index c85324a9..d06102db 100644
--- a/src/hooks/useMessage.tsx
+++ b/src/hooks/useMessage.tsx
@@ -133,7 +133,9 @@ export const useMessage = () => {
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed,
numGpu:
- currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
+ currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu,
+ numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict,
+
})
let newMessage: Message[] = []
@@ -261,7 +263,9 @@ export const useMessage = () => {
userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed,
numGpu:
- currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
+ currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu,
+ numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict,
+
})
const response = await questionOllama.invoke(promptForQuestion)
query = response.content.toString()
@@ -328,16 +332,31 @@ export const useMessage = () => {
const applicationChatHistory = generateHistory(history, selectedModel)
+ let generationInfo: any | undefined = undefined
+
const chunks = await ollama.stream(
[...applicationChatHistory, humanMessage],
{
- signal: signal
+ signal: signal,
+ callbacks: [
+ {
+ handleLLMEnd(
+ output: any,
+ ): any {
+ try {
+ generationInfo = output?.generations?.[0][0]?.generationInfo
+ } catch (e) {
+ console.log("handleLLMEnd error", e)
+ }
+ }
+ }
+ ]
}
)
let count = 0
for await (const chunk of chunks) {
- contentToSave += chunk.content
- fullText += chunk.content
+ contentToSave += chunk?.content
+ fullText += chunk?.content
if (count === 0) {
setIsProcessing(true)
}
@@ -361,7 +380,8 @@ export const useMessage = () => {
return {
...message,
message: fullText,
- sources: source
+ sources: source,
+ generationInfo
}
}
return message
@@ -390,7 +410,8 @@ export const useMessage = () => {
image,
fullText,
source,
- message_source: "copilot"
+ message_source: "copilot",
+ generationInfo
})
setIsProcessing(false)
@@ -458,7 +479,9 @@ export const useMessage = () => {
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed,
numGpu:
- currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
+ currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu,
+ numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict,
+
})
let newMessage: Message[] = []
@@ -544,16 +567,31 @@ export const useMessage = () => {
)
}
+ let generationInfo: any | undefined = undefined
+
const chunks = await ollama.stream(
[...applicationChatHistory, humanMessage],
{
- signal: signal
+ signal: signal,
+ callbacks: [
+ {
+ handleLLMEnd(
+ output: any,
+ ): any {
+ try {
+ generationInfo = output?.generations?.[0][0]?.generationInfo
+ } catch (e) {
+ console.log("handleLLMEnd error", e)
+ }
+ }
+ }
+ ]
}
)
let count = 0
for await (const chunk of chunks) {
- contentToSave += chunk.content
- fullText += chunk.content
+ contentToSave += chunk?.content
+ fullText += chunk?.content
if (count === 0) {
setIsProcessing(true)
}
@@ -576,7 +614,8 @@ export const useMessage = () => {
if (message.id === generateMessageId) {
return {
...message,
- message: fullText
+ message: fullText,
+ generationInfo
}
}
return message
@@ -605,7 +644,8 @@ export const useMessage = () => {
image,
fullText,
source: [],
- message_source: "copilot"
+ message_source: "copilot",
+ generationInfo
})
setIsProcessing(false)
@@ -668,7 +708,9 @@ export const useMessage = () => {
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed,
numGpu:
- currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
+ currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu,
+ numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict,
+
})
let newMessage: Message[] = []
@@ -743,7 +785,9 @@ export const useMessage = () => {
userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed,
numGpu:
- currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
+ currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu,
+ numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict,
+
})
const response = await questionOllama.invoke(promptForQuestion)
query = response.content.toString()
@@ -789,16 +833,30 @@ export const useMessage = () => {
)
}
+ let generationInfo: any | undefined = undefined
const chunks = await ollama.stream(
[...applicationChatHistory, humanMessage],
{
- signal: signal
+ signal: signal,
+ callbacks: [
+ {
+ handleLLMEnd(
+ output: any,
+ ): any {
+ try {
+ generationInfo = output?.generations?.[0][0]?.generationInfo
+ } catch (e) {
+ console.log("handleLLMEnd error", e)
+ }
+ }
+ }
+ ]
}
)
let count = 0
for await (const chunk of chunks) {
- contentToSave += chunk.content
- fullText += chunk.content
+ contentToSave += chunk?.content
+ fullText += chunk?.content
if (count === 0) {
setIsProcessing(true)
}
@@ -822,7 +880,8 @@ export const useMessage = () => {
return {
...message,
message: fullText,
- sources: source
+ sources: source,
+ generationInfo
}
}
return message
@@ -850,7 +909,8 @@ export const useMessage = () => {
message,
image,
fullText,
- source
+ source,
+ generationInfo
})
setIsProcessing(false)
@@ -914,7 +974,9 @@ export const useMessage = () => {
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed,
numGpu:
- currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
+ currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu,
+ numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict,
+
})
let newMessage: Message[] = []
@@ -982,13 +1044,28 @@ export const useMessage = () => {
})
}
+ let generationInfo: any | undefined = undefined
+
const chunks = await ollama.stream([humanMessage], {
- signal: signal
+ signal: signal,
+ callbacks: [
+ {
+ handleLLMEnd(
+ output: any,
+ ): any {
+ try {
+ generationInfo = output?.generations?.[0][0]?.generationInfo
+ } catch (e) {
+ console.log("handleLLMEnd error", e)
+ }
+ }
+ }
+ ]
})
let count = 0
for await (const chunk of chunks) {
- contentToSave += chunk.content
- fullText += chunk.content
+ contentToSave += chunk?.content
+ fullText += chunk?.content
if (count === 0) {
setIsProcessing(true)
}
@@ -1011,7 +1088,8 @@ export const useMessage = () => {
if (message.id === generateMessageId) {
return {
...message,
- message: fullText
+ message: fullText,
+ generationInfo
}
}
return message
@@ -1042,7 +1120,8 @@ export const useMessage = () => {
fullText,
source: [],
message_source: "copilot",
- message_type: messageType
+ message_type: messageType,
+ generationInfo
})
setIsProcessing(false)
diff --git a/src/hooks/useMessageOption.tsx b/src/hooks/useMessageOption.tsx
index 58d56bfc..322287c4 100644
--- a/src/hooks/useMessageOption.tsx
+++ b/src/hooks/useMessageOption.tsx
@@ -22,7 +22,10 @@ import { notification } from "antd"
import { getSystemPromptForWeb } from "~/web/web"
import { generateHistory } from "@/utils/generate-history"
import { useTranslation } from "react-i18next"
-import { saveMessageOnError, saveMessageOnSuccess } from "./chat-helper"
+import {
+ saveMessageOnError as saveError,
+ saveMessageOnSuccess as saveSuccess
+} from "./chat-helper"
import { usePageAssist } from "@/context"
import { PageAssistVectorStore } from "@/libs/PageAssistVectorStore"
import { formatDocs } from "@/chain/chat-with-x"
@@ -65,7 +68,9 @@ export const useMessageOption = () => {
selectedSystemPrompt,
setSelectedSystemPrompt,
selectedKnowledge,
- setSelectedKnowledge
+ setSelectedKnowledge,
+ temporaryChat,
+ setTemporaryChat
} = useStoreMessageOption()
const currentChatModelSettings = useStoreChatModelSettings()
const [selectedModel, setSelectedModel] = useStorage("selectedModel")
@@ -122,7 +127,10 @@ export const useMessageOption = () => {
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed,
numGpu:
- currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
+ currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu,
+ numPredict:
+ currentChatModelSettings?.numPredict ??
+ userDefaultModelSettings?.numPredict
})
let newMessage: Message[] = []
@@ -197,7 +205,11 @@ export const useMessageOption = () => {
userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed,
numGpu:
- currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
+ currentChatModelSettings?.numGpu ??
+ userDefaultModelSettings?.numGpu,
+ numPredict:
+ currentChatModelSettings?.numPredict ??
+ userDefaultModelSettings?.numPredict
})
const response = await questionOllama.invoke(promptForQuestion)
query = response.content.toString()
@@ -243,16 +255,29 @@ export const useMessageOption = () => {
)
}
+ let generationInfo: any | undefined = undefined
+
const chunks = await ollama.stream(
[...applicationChatHistory, humanMessage],
{
- signal: signal
+ signal: signal,
+ callbacks: [
+ {
+ handleLLMEnd(output: any): any {
+ try {
+ generationInfo = output?.generations?.[0][0]?.generationInfo
+ } catch (e) {
+ console.log("handleLLMEnd error", e)
+ }
+ }
+ }
+ ]
}
)
let count = 0
for await (const chunk of chunks) {
- contentToSave += chunk.content
- fullText += chunk.content
+ contentToSave += chunk?.content
+ fullText += chunk?.content
if (count === 0) {
setIsProcessing(true)
}
@@ -276,7 +301,8 @@ export const useMessageOption = () => {
return {
...message,
message: fullText,
- sources: source
+ sources: source,
+ generationInfo
}
}
return message
@@ -304,7 +330,8 @@ export const useMessageOption = () => {
message,
image,
fullText,
- source
+ source,
+ generationInfo
})
setIsProcessing(false)
@@ -336,6 +363,39 @@ export const useMessageOption = () => {
}
}
+ const saveMessageOnSuccess = async (e: any) => {
+ if (!temporaryChat) {
+ return await saveSuccess(e)
+ } else {
+ setHistoryId("temp")
+ }
+
+ return true
+ }
+
+ const saveMessageOnError = async (e: any) => {
+ if (!temporaryChat) {
+ return await saveError(e)
+ } else {
+ setHistory([
+ ...history,
+ {
+ role: "user",
+ content: e.userMessage,
+ image: e.image
+ },
+ {
+ role: "assistant",
+ content: e.botMessage
+ }
+ ])
+
+ setHistoryId("temp")
+ }
+
+ return true
+ }
+
const normalChatMode = async (
message: string,
image: string,
@@ -366,7 +426,10 @@ export const useMessageOption = () => {
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed,
numGpu:
- currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
+ currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu,
+ numPredict:
+ currentChatModelSettings?.numPredict ??
+ userDefaultModelSettings?.numPredict
})
let newMessage: Message[] = []
@@ -465,17 +528,30 @@ export const useMessageOption = () => {
)
}
+ let generationInfo: any | undefined = undefined
+
const chunks = await ollama.stream(
[...applicationChatHistory, humanMessage],
{
- signal: signal
+ signal: signal,
+ callbacks: [
+ {
+ handleLLMEnd(output: any): any {
+ try {
+ generationInfo = output?.generations?.[0][0]?.generationInfo
+ } catch (e) {
+ console.log("handleLLMEnd error", e)
+ }
+ }
+ }
+ ]
}
)
let count = 0
for await (const chunk of chunks) {
- contentToSave += chunk.content
- fullText += chunk.content
+ contentToSave += chunk?.content
+ fullText += chunk?.content
if (count === 0) {
setIsProcessing(true)
}
@@ -498,7 +574,8 @@ export const useMessageOption = () => {
if (message.id === generateMessageId) {
return {
...message,
- message: fullText
+ message: fullText,
+ generationInfo
}
}
return message
@@ -526,7 +603,8 @@ export const useMessageOption = () => {
message,
image,
fullText,
- source: []
+ source: [],
+ generationInfo
})
setIsProcessing(false)
@@ -586,7 +664,10 @@ export const useMessageOption = () => {
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed,
numGpu:
- currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
+ currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu,
+ numPredict:
+ currentChatModelSettings?.numPredict ??
+ userDefaultModelSettings?.numPredict
})
let newMessage: Message[] = []
@@ -677,7 +758,11 @@ export const useMessageOption = () => {
userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed,
numGpu:
- currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
+ currentChatModelSettings?.numGpu ??
+ userDefaultModelSettings?.numGpu,
+ numPredict:
+ currentChatModelSettings?.numPredict ??
+ userDefaultModelSettings?.numPredict
})
const response = await questionOllama.invoke(promptForQuestion)
query = response.content.toString()
@@ -711,16 +796,29 @@ export const useMessageOption = () => {
const applicationChatHistory = generateHistory(history, selectedModel)
+ let generationInfo: any | undefined = undefined
+
const chunks = await ollama.stream(
[...applicationChatHistory, humanMessage],
{
- signal: signal
+ signal: signal,
+ callbacks: [
+ {
+ handleLLMEnd(output: any): any {
+ try {
+ generationInfo = output?.generations?.[0][0]?.generationInfo
+ } catch (e) {
+ console.log("handleLLMEnd error", e)
+ }
+ }
+ }
+ ]
}
)
let count = 0
for await (const chunk of chunks) {
- contentToSave += chunk.content
- fullText += chunk.content
+ contentToSave += chunk?.content
+ fullText += chunk?.content
if (count === 0) {
setIsProcessing(true)
}
@@ -744,7 +842,8 @@ export const useMessageOption = () => {
return {
...message,
message: fullText,
- sources: source
+ sources: source,
+ generationInfo
}
}
return message
@@ -772,7 +871,8 @@ export const useMessageOption = () => {
message,
image,
fullText,
- source
+ source,
+ generationInfo
})
setIsProcessing(false)
@@ -984,6 +1084,8 @@ export const useMessageOption = () => {
textareaRef,
selectedKnowledge,
setSelectedKnowledge,
- ttsEnabled
+ ttsEnabled,
+ temporaryChat,
+ setTemporaryChat
}
}
diff --git a/src/i18n/index.ts b/src/i18n/index.ts
index 97bbf6f8..effddb4a 100644
--- a/src/i18n/index.ts
+++ b/src/i18n/index.ts
@@ -14,6 +14,7 @@ import { de } from "./lang/de";
import { da } from "./lang/da";
import { no } from "./lang/no";
import { sv } from "./lang/sv";
+import { ko } from "./lang/ko";
i18n
@@ -37,7 +38,8 @@ i18n
da: da,
no: no,
de: de,
- sv: sv
+ sv: sv,
+ ko: ko,
},
fallbackLng: "en",
lng: localStorage.getItem("i18nextLng") || "en",
diff --git a/src/i18n/lang/ko.ts b/src/i18n/lang/ko.ts
new file mode 100644
index 00000000..037ce8f2
--- /dev/null
+++ b/src/i18n/lang/ko.ts
@@ -0,0 +1,19 @@
+import option from "@/assets/locale/ko/option.json";
+import playground from "@/assets/locale/ko/playground.json";
+import common from "@/assets/locale/ko/common.json";
+import sidepanel from "@/assets/locale/ko/sidepanel.json";
+import settings from "@/assets/locale/ko/settings.json";
+import knowledge from "@/assets/locale/ko/knowledge.json";
+import chrome from "@/assets/locale/ko/chrome.json";
+import openai from "@/assets/locale/ko/openai.json";
+
+export const ko = {
+ option,
+ playground,
+ common,
+ sidepanel,
+ settings,
+ knowledge,
+ chrome,
+ openai
+}
\ No newline at end of file
diff --git a/src/i18n/support-language.ts b/src/i18n/support-language.ts
index 2c62ad40..273964d9 100644
--- a/src/i18n/support-language.ts
+++ b/src/i18n/support-language.ts
@@ -55,5 +55,9 @@ export const supportLanguage = [
{
value: "sv",
label: "Svenska"
+ },
+ {
+ value: "ko",
+ label: "한국어"
}
]
diff --git a/src/models/embedding.ts b/src/models/embedding.ts
index 03eb663a..0c1026b6 100644
--- a/src/models/embedding.ts
+++ b/src/models/embedding.ts
@@ -1,4 +1,4 @@
-import { getModelInfo, isCustomModel } from "@/db/models"
+import { getModelInfo, isCustomModel, isOllamaModel } from "@/db/models"
import { OllamaEmbeddingsPageAssist } from "./OllamaEmbedding"
import { OAIEmbedding } from "./OAIEmbedding"
import { getOpenAIConfigById } from "@/db/openai"
diff --git a/src/models/index.ts b/src/models/index.ts
index d459e664..135025fa 100644
--- a/src/models/index.ts
+++ b/src/models/index.ts
@@ -1,8 +1,9 @@
-import { getModelInfo, isCustomModel } from "@/db/models"
+import { getModelInfo, isCustomModel, isOllamaModel } from "@/db/models"
import { ChatChromeAI } from "./ChatChromeAi"
import { ChatOllama } from "./ChatOllama"
import { getOpenAIConfigById } from "@/db/openai"
import { ChatOpenAI } from "@langchain/openai"
+import { urlRewriteRuntime } from "@/libs/runtime"
export const pageAssistModel = async ({
model,
@@ -13,7 +14,8 @@ export const pageAssistModel = async ({
topP,
numCtx,
seed,
- numGpu
+ numGpu,
+ numPredict,
}: {
model: string
baseUrl: string
@@ -24,12 +26,13 @@ export const pageAssistModel = async ({
numCtx?: number
seed?: number
numGpu?: number
+ numPredict?: number
}) => {
if (model === "chrome::gemini-nano::page-assist") {
return new ChatChromeAI({
temperature,
- topK
+ topK,
})
}
@@ -41,15 +44,20 @@ export const pageAssistModel = async ({
const modelInfo = await getModelInfo(model)
const providerInfo = await getOpenAIConfigById(modelInfo.provider_id)
+ if (isOllamaModel(model)) {
+ await urlRewriteRuntime(providerInfo.baseUrl || "")
+ }
+
return new ChatOpenAI({
modelName: modelInfo.model_id,
openAIApiKey: providerInfo.apiKey || "temp",
temperature,
topP,
+ maxTokens: numPredict,
configuration: {
apiKey: providerInfo.apiKey || "temp",
baseURL: providerInfo.baseUrl || "",
- }
+ },
}) as any
}
@@ -64,7 +72,8 @@ export const pageAssistModel = async ({
numCtx,
seed,
model,
- numGpu
+ numGpu,
+ numPredict
})
diff --git a/src/models/utils/chrome.ts b/src/models/utils/chrome.ts
index 16e0b8b8..3d426863 100644
--- a/src/models/utils/chrome.ts
+++ b/src/models/utils/chrome.ts
@@ -3,13 +3,19 @@ export const checkChromeAIAvailability = async (): Promise<"readily" | "no" | "a
try {
const ai = (window as any).ai;
- // upcoming version change
+ // latest i guess
+ if (ai?.languageModel?.capabilities) {
+ const capabilities = await ai.languageModel.capabilities();
+ return capabilities?.available ?? "no";
+ }
+
+ // old version change
if (ai?.assistant?.capabilities) {
const capabilities = await ai.assistant.capabilities();
return capabilities?.available ?? "no";
}
- // old version
+ // too old version
if (ai?.canCreateTextSession) {
const available = await ai.canCreateTextSession();
return available ?? "no";
@@ -33,7 +39,15 @@ export interface AITextSession {
export const createAITextSession = async (data: any): Promise
=> {
const ai = (window as any).ai;
- // upcoming version change
+ // new version i guess
+ if (ai?.languageModel?.create) {
+ const session = await ai.languageModel.create({
+ ...data
+ })
+ return session
+ }
+
+ // old version change
if (ai?.assistant?.create) {
const session = await ai.assistant.create({
...data
@@ -41,7 +55,7 @@ export const createAITextSession = async (data: any): Promise =>
return session
}
- // old version
+ // too old version
if (ai.createTextSession) {
const session = await ai.createTextSession({
...data
diff --git a/src/store/option.tsx b/src/store/option.tsx
index 8a55501e..49911cd3 100644
--- a/src/store/option.tsx
+++ b/src/store/option.tsx
@@ -65,6 +65,9 @@ type State = {
setSpeechToTextLanguage: (language: string) => void
speechToTextLanguage: string
+
+ temporaryChat: boolean
+ setTemporaryChat: (temporaryChat: boolean) => void
}
export const useStoreMessageOption = create((set) => ({
@@ -102,5 +105,8 @@ export const useStoreMessageOption = create((set) => ({
setSelectedQuickPrompt: (selectedQuickPrompt) => set({ selectedQuickPrompt }),
selectedKnowledge: null,
- setSelectedKnowledge: (selectedKnowledge) => set({ selectedKnowledge })
+ setSelectedKnowledge: (selectedKnowledge) => set({ selectedKnowledge }),
+
+ temporaryChat: false,
+ setTemporaryChat: (temporaryChat) => set({ temporaryChat }),
}))
diff --git a/src/types/message.ts b/src/types/message.ts
index 3be7cdc0..1ec1d5ae 100644
--- a/src/types/message.ts
+++ b/src/types/message.ts
@@ -1,19 +1,20 @@
type WebSearch = {
- search_engine: string
- search_url: string
- search_query: string
- search_results: {
- title: string
- link: string
- }[]
- }
- export type Message = {
- isBot: boolean
- name: string
- message: string
- sources: any[]
- images?: string[]
- search?: WebSearch
- messageType?: string
- id?: string
- }
\ No newline at end of file
+ search_engine: string
+ search_url: string
+ search_query: string
+ search_results: {
+ title: string
+ link: string
+ }[]
+}
+export type Message = {
+ isBot: boolean
+ name: string
+ message: string
+ sources: any[]
+ images?: string[]
+ search?: WebSearch
+ messageType?: string
+ id?: string
+ generationInfo?: any
+}
\ No newline at end of file
diff --git a/src/utils/human-message.tsx b/src/utils/human-message.tsx
index 67123394..9c1200fd 100644
--- a/src/utils/human-message.tsx
+++ b/src/utils/human-message.tsx
@@ -1,43 +1,46 @@
import { isCustomModel } from "@/db/models"
import { HumanMessage, type MessageContent } from "@langchain/core/messages"
-
type HumanMessageType = {
- content: MessageContent,
- model: string
+ content: MessageContent
+ model: string
}
export const humanMessageFormatter = ({ content, model }: HumanMessageType) => {
-
- const isCustom = isCustomModel(model)
-
- if(isCustom) {
- if(typeof content !== 'string') {
- if(content.length > 1) {
- // this means that we need to reformat the image_url
- const newContent: MessageContent = [
- {
- type: "text",
- //@ts-ignore
- text: content[0].text
- },
- {
- type: "image_url",
- image_url: {
- //@ts-ignore
- url: content[1].image_url
- }
- }
- ]
+ const isCustom = isCustomModel(model)
- return new HumanMessage({
- content: newContent
- })
+ if (isCustom) {
+ if (typeof content !== "string") {
+ if (content.length > 1) {
+ // this means that we need to reformat the image_url
+ const newContent: MessageContent = [
+ {
+ type: "text",
+ //@ts-ignore
+ text: content[0].text
+ },
+ {
+ type: "image_url",
+ image_url: {
+ //@ts-ignore
+ url: content[1].image_url
}
- }
+ }
+ ]
+
+ return new HumanMessage({
+ content: newContent
+ })
+ } else {
+ return new HumanMessage({
+ //@ts-ignore
+ content: content[0].text
+ })
+ }
}
-
- return new HumanMessage({
- content,
- })
-}
\ No newline at end of file
+ }
+
+ return new HumanMessage({
+ content
+ })
+}
diff --git a/src/utils/oai-api-providers.ts b/src/utils/oai-api-providers.ts
index 5d651053..40c1e21f 100644
--- a/src/utils/oai-api-providers.ts
+++ b/src/utils/oai-api-providers.ts
@@ -1,9 +1,24 @@
export const OAI_API_PROVIDERS = [
+ {
+ label: "Custom",
+ value: "custom",
+ baseUrl: ""
+ },
{
label: "LM Studio",
value: "lmstudio",
baseUrl: "http://localhost:1234/v1"
},
+ {
+ label: "Llamafile",
+ value: "llamafile",
+ baseUrl: "http://127.0.0.1:8080/v1"
+ },
+ {
+ label: "Ollama",
+ value: "ollama2",
+ baseUrl: "http://localhost:11434/v1"
+ },
{
label: "OpenAI",
value: "openai",
@@ -29,9 +44,5 @@ export const OAI_API_PROVIDERS = [
value: "openrouter",
baseUrl: "https://openrouter.ai/api/v1"
},
- {
- label: "Custom",
- value: "custom",
- baseUrl: ""
- }
+
]
\ No newline at end of file
diff --git a/wxt.config.ts b/wxt.config.ts
index c870e8e4..288a5553 100644
--- a/wxt.config.ts
+++ b/wxt.config.ts
@@ -50,7 +50,7 @@ export default defineConfig({
outDir: "build",
manifest: {
- version: "1.3.3",
+ version: "1.3.4",
name:
process.env.TARGET === "firefox"
? "Page Assist - A Web UI for Local AI Models"