From f61ce71d064bedfc7c198a20ffcc567cc94c917f Mon Sep 17 00:00:00 2001 From: hkjang Date: Wed, 6 Nov 2024 14:32:36 +0900 Subject: [PATCH 01/17] add Koraen localization --- src/assets/locale/ko/chrome.json | 13 + src/assets/locale/ko/common.json | 115 +++++++++ src/assets/locale/ko/knowledge.json | 40 ++++ src/assets/locale/ko/openai.json | 90 +++++++ src/assets/locale/ko/option.json | 12 + src/assets/locale/ko/playground.json | 29 +++ src/assets/locale/ko/settings.json | 345 +++++++++++++++++++++++++++ src/assets/locale/ko/sidepanel.json | 5 + 8 files changed, 649 insertions(+) create mode 100644 src/assets/locale/ko/chrome.json create mode 100644 src/assets/locale/ko/common.json create mode 100644 src/assets/locale/ko/knowledge.json create mode 100644 src/assets/locale/ko/openai.json create mode 100644 src/assets/locale/ko/option.json create mode 100644 src/assets/locale/ko/playground.json create mode 100644 src/assets/locale/ko/settings.json create mode 100644 src/assets/locale/ko/sidepanel.json diff --git a/src/assets/locale/ko/chrome.json b/src/assets/locale/ko/chrome.json new file mode 100644 index 0000000..8f89fc1 --- /dev/null +++ b/src/assets/locale/ko/chrome.json @@ -0,0 +1,13 @@ +{ + "heading": "Chrome AI 설정", + "status": { + "label": "Page Assist에서 Chrome AI 지원을 활성화하거나 비활성화하기" + }, + "error": { + "browser_not_supported": "이 Chrome 버전은 Gemini Nano 모델을 지원하지 않습니다. 버전을 127 이상으로 업데이트해 주세요.", + "ai_not_supported": "설정 `chrome://flags/#prompt-api-for-gemini-nano`가 활성화되지 않았습니다. 활성화해 주세요.", + "ai_not_ready": "Gemini Nano가 아직 준비되지 않았습니다. Chrome 설정을 다시 확인해 주세요.", + "internal_error": "내부 오류가 발생했습니다. 나중에 다시 시도해 주세요." + }, + "errorDescription": "Chrome AI를 사용하려면 현재 Dev 및 Canary 채널에 있는 127 이상의 브라우저 버전이 필요합니다. 지원되는 버전을 다운로드한 후, 다음 단계를 따르세요:\n\n1. `chrome://flags/#prompt-api-for-gemini-nano`에 접속하여 '활성화'를 선택합니다.\n2. `chrome://flags/#optimization-guide-on-device-model`에 접속하여 'EnabledBypassPrefRequirement'를 선택합니다.\n3. `chrome://components`에 접속하여 'Optimization Guide On Device Model'을 검색한 후 '업데이트 확인'을 클릭합니다. 이를 통해 모델이 다운로드됩니다. 설정이 표시되지 않는 경우, 단계 1과 2를 반복하고 브라우저를 재시작해 주세요." +} diff --git a/src/assets/locale/ko/common.json b/src/assets/locale/ko/common.json new file mode 100644 index 0000000..908bb0d --- /dev/null +++ b/src/assets/locale/ko/common.json @@ -0,0 +1,115 @@ +{ + "pageAssist": "페이지 어시스트", + "selectAModel": "모델 선택", + "save": "저장", + "saved": "저장됨", + "cancel": "취소", + "retry": "재시도", + "share": { + "tooltip": { + "share": "공유" + }, + "modal": { + "title": "채팅 링크 공유" + }, + "form": { + "defaultValue": { + "name": "익명", + "title": "제목 없는 채팅" + }, + "title": { + "label": "채팅 제목", + "placeholder": "채팅 제목을 입력하세요", + "required": "채팅 제목은 필수 항목입니다" + }, + "name": { + "label": "이름", + "placeholder": "이름을 입력하세요", + "required": "이름은 필수 항목입니다" + }, + "btn": { + "save": "링크 생성", + "saving": "링크 생성 중..." + } + }, + "notification": { + "successGenerate": "링크가 클립보드에 복사되었습니다", + "failGenerate": "링크 생성에 실패했습니다" + } + }, + "copyToClipboard": "클립보드에 복사", + "webSearch": "웹 검색 중", + "regenerate": "재생성", + "edit": "편집", + "delete": "삭제", + "saveAndSubmit": "저장하고 제출", + "editMessage": { + "placeholder": "메시지를 입력하세요..." + }, + "submit": "제출", + "noData": "데이터가 없습니다", + "noHistory": "채팅 기록이 없습니다", + "chatWithCurrentPage": "현재 페이지에서 채팅", + "beta": "베타", + "tts": "TTS", + "currentChatModelSettings": "현재 채팅 모델 설정", + "modelSettings": { + "label": "모델 설정", + "description": "모든 채팅에 대해 글로벌 모델 옵션을 설정합니다", + "form": { + "keepAlive": { + "label": "Keep Alive", + "help": "요청 후 모델이 메모리에 유지되는 시간을 설정합니다 (기본값: 5분)", + "placeholder": "Keep Alive 기간을 입력하세요 (예: 5분, 10분, 1시간)" + }, + "temperature": { + "label": "온도", + "placeholder": "온도 값을 입력하세요 (예: 0.7, 1.0)" + }, + "numCtx": { + "label": "컨텍스트 수", + "placeholder": "컨텍스트 수를 입력하세요 (기본값: 2048)" + }, + "seed": { + "label": "시드", + "placeholder": "시드 값을 입력하세요 (예: 1234)", + "help": "모델 출력의 재현성" + }, + "topK": { + "label": "Top K", + "placeholder": "Top K 값을 입력하세요 (예: 40, 100)" + }, + "topP": { + "label": "Top P", + "placeholder": "Top P 값을 입력하세요 (예: 0.9, 0.95)" + }, + "numGpu": { + "label": "GPU 수", + "placeholder": "GPU에 할당할 레이어 수를 입력하세요" + }, + "systemPrompt": { + "label": "임시 시스템 프롬프트", + "placeholder": "시스템 프롬프트를 입력하세요", + "help": "현재 채팅에서 시스템 프롬프트를 빠르게 설정하는 방법이며, 선택된 시스템 프롬프트가 있을 경우 이를 덮어씁니다." + } + }, + "advanced": "기타 모델 설정" + }, + "copilot": { + "summary": "요약", + "explain": "설명", + "rephrase": "다르게 표현", + "translate": "번역" + }, + "citations": "인용", + "downloadCode": "코드 다운로드", + "date": { + "pinned": "고정됨", + "today": "오늘", + "yesterday": "어제", + "last7Days": "지난 7일", + "older": "그 이전" + }, + "pin": "고정", + "unpin": "고정 해제" +} diff --git a/src/assets/locale/ko/knowledge.json b/src/assets/locale/ko/knowledge.json new file mode 100644 index 0000000..1bf8536 --- /dev/null +++ b/src/assets/locale/ko/knowledge.json @@ -0,0 +1,40 @@ +{ + "addBtn": "새 지식 추가", + "columns": { + "title": "제목", + "status": "상태", + "embeddings": "임베딩 모델", + "createdAt": "생성일", + "action": "작업" + }, + "expandedColumns": { + "name": "이름" + }, + "confirm": { + "delete": "이 지식을 삭제하시겠습니까?" + }, + "deleteSuccess": "지식이 정상적으로 삭제되었습니다", + "status": { + "pending": "대기 중", + "finished": "완료", + "processing": "처리 중", + "failed": "실패" + }, + "addKnowledge": "지식 추가", + "form": { + "title": { + "label": "지식 제목", + "placeholder": "지식 제목을 입력하세요", + "required": "지식 제목은 필수 항목입니다" + }, + "uploadFile": { + "label": "파일 업로드", + "uploadText": "파일을 여기에 드래그 앤 드롭하거나 클릭하여 업로드하세요", + "uploadHint": "지원되는 파일 형식: .pdf, .csv, .txt", + "required": "파일은 필수 항목입니다" + }, + "submit": "제출", + "success": "지식이 정상적으로 추가되었습니다" + }, + "noEmbeddingModel": "먼저 RAG 설정 페이지에서 임베딩 모델을 추가해 주세요" +} diff --git a/src/assets/locale/ko/openai.json b/src/assets/locale/ko/openai.json new file mode 100644 index 0000000..13bc431 --- /dev/null +++ b/src/assets/locale/ko/openai.json @@ -0,0 +1,90 @@ +{ + "settings": "OpenAI 호환 API", + "heading": "OpenAI 호환 API", + "subheading": "여기에서 OpenAI API 호환 공급자를 관리하고 설정할 수 있습니다.", + "addBtn": "공급자 추가", + "table": { + "name": "공급자 이름", + "baseUrl": "기본 URL", + "actions": "작업" + }, + "modal": { + "titleAdd": "새 공급자 추가", + "name": { + "label": "공급자 이름", + "required": "공급자 이름은 필수 항목입니다.", + "placeholder": "공급자 이름 입력" + }, + "baseUrl": { + "label": "기본 URL", + "help": "OpenAI API 공급자의 기본 URL 예시: (http://localhost:1234/v1)", + "required": "기본 URL은 필수 항목입니다.", + "placeholder": "기본 URL 입력" + }, + "apiKey": { + "label": "API 키", + "required": "API 키는 필수 항목입니다.", + "placeholder": "API 키 입력" + }, + "submit": "저장", + "update": "업데이트", + "deleteConfirm": "이 공급자를 삭제하시겠습니까?", + "model": { + "title": "모델 목록", + "subheading": "이 공급자에서 사용하고자 하는 챗 모델을 선택하세요.", + "success": "새로운 모델이 정상적으로 추가되었습니다." + }, + "tipLMStudio": "Page Assist는 LM Studio에 로드된 모델을 자동으로 가져옵니다. 수동 추가가 필요하지 않습니다." + }, + "addSuccess": "공급자가 정상적으로 추가되었습니다.", + "deleteSuccess": "공급자가 정상적으로 삭제되었습니다.", + "updateSuccess": "공급자가 정상적으로 업데이트되었습니다.", + "delete": "삭제", + "edit": "편집", + "newModel": "공급자에 모델 추가", + "noNewModel": "LMStudio의 경우 동적으로 가져옵니다. 수동 추가는 필요하지 않습니다.", + "searchModel": "모델 검색", + "selectAll": "모두 선택", + "save": "저장", + "saving": "저장 중...", + "manageModels": { + "columns": { + "name": "모델 이름", + "model_type": "모델 타입", + "model_id": "모델 ID", + "provider": "공급자 이름", + "actions": "작업" + }, + "tooltip": { + "delete": "삭제" + }, + "confirm": { + "delete": "이 모델을 삭제하시겠습니까?" + }, + "modal": { + "title": "사용자 정의 모델 추가", + "form": { + "name": { + "label": "모델 ID", + "placeholder": "llama3.2", + "required": "모델 ID는 필수 항목입니다." + }, + "provider": { + "label": "공급자", + "placeholder": "공급자 선택", + "required": "공급자는 필수 항목입니다." + }, + "type": { + "label": "모델 타입" + } + } + } + }, + "noModelFound": "모델을 찾을 수 없습니다. 올바른 기본 URL과 API 키를 가진 공급자가 추가되었는지 확인하세요.", + "radio": { + "chat": "챗 모델", + "embedding": "임베딩 모델", + "chatInfo": "는 챗 완료 및 대화 생성에 사용됩니다", + "embeddingInfo": "는 RAG 및 기타 의미 검색 관련 작업에 사용됩니다." + } +} diff --git a/src/assets/locale/ko/option.json b/src/assets/locale/ko/option.json new file mode 100644 index 0000000..21a5048 --- /dev/null +++ b/src/assets/locale/ko/option.json @@ -0,0 +1,12 @@ +{ + "newChat": "새 채팅", + "selectAPrompt": "프롬프트 선택", + "githubRepository": "GitHub 리포지토리", + "settings": "설정", + "sidebarTitle": "채팅 기록", + "error": "오류", + "somethingWentWrong": "문제가 발생했습니다", + "validationSelectModel": "계속하려면 모델을 선택하세요", + "deleteHistoryConfirmation": "이 기록을 삭제하시겠습니까?", + "editHistoryTitle": "새 제목 입력" +} diff --git a/src/assets/locale/ko/playground.json b/src/assets/locale/ko/playground.json new file mode 100644 index 0000000..082ad5d --- /dev/null +++ b/src/assets/locale/ko/playground.json @@ -0,0 +1,29 @@ +{ + "ollamaState": { + "searching": "Ollama 검색 중 🦙", + "running": "Ollama 실행 중 🦙", + "notRunning": "Ollama에 연결할 수 없습니다 🦙", + "connectionError": "연결 오류가 발생한 것 같습니다. 문제 해결에 대한 자세한 내용은 문서를 참조하세요." + }, + "formError": { + "noModel": "모델을 선택하세요", + "noEmbeddingModel": "설정 > RAG 페이지에서 임베딩 모델을 설정하세요" + }, + "form": { + "textarea": { + "placeholder": "메시지를 입력하세요..." + }, + "webSearch": { + "on": "켜짐", + "off": "꺼짐" + } + }, + "tooltip": { + "searchInternet": "인터넷 검색", + "speechToText": "음성 입력", + "uploadImage": "이미지 업로드", + "stopStreaming": "스트리밍 중지", + "knowledge": "지식" + }, + "sendWhenEnter": "Enter 키를 누르면 전송" +} diff --git a/src/assets/locale/ko/settings.json b/src/assets/locale/ko/settings.json new file mode 100644 index 0000000..072c7f6 --- /dev/null +++ b/src/assets/locale/ko/settings.json @@ -0,0 +1,345 @@ +{ + "generalSettings": { + "title": "일반 설정", + "settings": { + "heading": "웹 UI 설정", + "speechRecognitionLang": { + "label": "음성 인식 언어", + "placeholder": "언어 선택" + }, + "language": { + "label": "언어", + "placeholder": "언어 선택" + }, + "darkMode": { + "label": "테마 변경", + "options": { + "light": "라이트", + "dark": "다크" + } + }, + "searchMode": { + "label": "간편 인터넷 검색 실행" + }, + "copilotResumeLastChat": { + "label": "사이드 패널을 열 때 마지막 채팅 재개 (Copilot)" + }, + "hideCurrentChatModelSettings": { + "label": "현재 채팅 모델 설정 숨기기" + }, + "restoreLastChatModel": { + "label": "이전 채팅에서 마지막 사용한 모델 복원" + }, + "sendNotificationAfterIndexing": { + "label": "지식 베이스 처리 완료 후 알림 전송" + }, + "generateTitle": { + "label": "AI로 제목 생성" + } + }, + "sidepanelRag": { + "heading": "웹사이트와의 Copilot 채팅 설정", + "ragEnabled": { + "label": "벡터 임베딩을 사용하여 웹사이트와 채팅" + }, + "maxWebsiteContext": { + "label": "일반 모드 웹사이트 콘텐츠 크기", + "placeholder": "콘텐츠 크기 (기본값 4028)" + } + }, + "webSearch": { + "heading": "웹 검색 관리", + "searchMode": { + "label": "간편한 인터넷 검색 실행" + }, + "provider": { + "label": "검색 엔진", + "placeholder": "검색 엔진 선택" + }, + "totalSearchResults": { + "label": "총 검색 결과", + "placeholder": "총 검색 결과 입력" + }, + "visitSpecificWebsite": { + "label": "메시지에 언급된 웹사이트 방문" + } + }, + "system": { + "heading": "시스템 설정", + "deleteChatHistory": { + "label": "채팅 기록 삭제", + "button": "삭제", + "confirm": "채팅 기록을 삭제하시겠습니까? 이 작업은 되돌릴 수 없습니다." + }, + "export": { + "label": "채팅 기록, 지식 베이스, 프롬프트 내보내기", + "button": "데이터 내보내기", + "success": "내보내기 성공" + }, + "import": { + "label": "채팅 기록, 지식 베이스, 프롬프트 가져오기", + "button": "데이터 가져오기", + "success": "가져오기 성공", + "error": "가져오기 오류" + } + }, + "tts": { + "heading": "텍스트 음성 변환 설정", + "ttsEnabled": { + "label": "텍스트 음성 변환 활성화" + }, + "ttsProvider": { + "label": "텍스트 음성 변환 제공자", + "placeholder": "제공자 선택" + }, + "ttsVoice": { + "label": "텍스트 음성 변환 음성", + "placeholder": "음성 선택" + }, + "ssmlEnabled": { + "label": "SSML (Speech Synthesis Markup Language) 활성화" + } + } + }, + "manageModels": { + "title": "모델 관리", + "addBtn": "새 모델 추가", + "columns": { + "name": "이름", + "digest": "다이제스트", + "modifiedAt": "수정 일시", + "size": "크기", + "actions": "동작" + }, + "expandedColumns": { + "parentModel": "상위 모델", + "format": "형식", + "family": "패밀리", + "parameterSize": "파라미터 크기", + "quantizationLevel": "양자화 수준" + }, + "tooltip": { + "delete": "모델 삭제", + "repull": "모델 다시 가져오기" + }, + "confirm": { + "delete": "이 모델을 정말 삭제하시겠습니까?", + "repull": "이 모델을 정말 다시 가져오시겠습니까?" + }, + "modal": { + "title": "새 모델 추가", + "placeholder": "모델 이름 입력", + "pull": "모델 가져오기" + }, + "notification": { + "pullModel": "모델 가져오는 중", + "pullModelDescription": "{{modelName}} 모델을 가져오는 중입니다. 자세한 내용은 확장 기능 아이콘을 확인하세요.", + "success": "성공", + "error": "오류", + "successDescription": "모델 가져오기가 완료되었습니다", + "successDeleteDescription": "모델 삭제가 완료되었습니다", + "someError": "문제가 발생했습니다. 나중에 다시 시도해 주세요." + } + }, + "managePrompts": { + "title": "프롬프트 관리", + "addBtn": "새 프롬프트 추가", + "option1": "일반", + "option2": "RAG", + "questionPrompt": "질문 프롬프트", + "columns": { + "title": "제목", + "prompt": "프롬프트", + "type": "프롬프트 유형", + "actions": "동작" + }, + "systemPrompt": "시스템 프롬프트", + "quickPrompt": "퀵 프롬프트", + "tooltip": { + "delete": "프롬프트 삭제", + "edit": "프롬프트 수정" + }, + "confirm": { + "delete": "이 프롬프트를 정말 삭제하시겠습니까? 이 작업은 되돌릴 수 없습니다." + }, + "modal": { + "addTitle": "새 프롬프트 추가", + "editTitle": "프롬프트 수정" + }, + "segmented": { + "custom": "커스텀 프롬프트", + "copilot": "Copilot 프롬프트" + }, + "form": { + "title": { + "label": "제목", + "placeholder": "훌륭한 프롬프트", + "required": "제목을 입력하세요" + }, + "prompt": { + "label": "프롬프트", + "placeholder": "프롬프트 입력", + "required": "프롬프트를 입력하세요", + "help": "프롬프트 내에서 {key}를 변수로 사용할 수 있습니다.", + "missingTextPlaceholder": "프롬프트에 {text} 변수가 없습니다. 추가해 주세요." + }, + "isSystem": { + "label": "시스템 프롬프트" + }, + "btnSave": { + "saving": "프롬프트 추가 중...", + "save": "프롬프트 추가" + }, + "btnEdit": { + "saving": "프롬프트 업데이트 중...", + "save": "프롬프트 업데이트" + } + }, + "notification": { + "addSuccess": "프롬프트가 추가되었습니다", + "addSuccessDesc": "프롬프트가 정상적으로 추가되었습니다", + "error": "오류", + "someError": "문제가 발생했습니다. 나중에 다시 시도해 주세요.", + "updatedSuccess": "프롬프트가 업데이트되었습니다", + "updatedSuccessDesc": "프롬프트가 정상적으로 업데이트되었습니다", + "deletedSuccess": "프롬프트가 삭제되었습니다", + "deletedSuccessDesc": "프롬프트가 정상적으로 삭제되었습니다" + } + }, + "manageShare": { + "title": "공유 관리", + "heading": "페이지 공유 URL 설정", + "form": { + "url": { + "label": "페이지 공유 URL", + "placeholder": "페이지 공유 URL 입력", + "required": "페이지 공유 URL을 입력해 주세요!", + "help": "개인정보 보호를 위해 페이지 공유를 자체 호스팅하고, 해당 URL을 여기에 입력할 수 있습니다. 자세히 보기" + } + }, + "webshare": { + "heading": "웹 공유", + "columns": { + "title": "제목", + "url": "URL", + "actions": "동작" + }, + "tooltip": { + "delete": "공유 삭제" + }, + "confirm": { + "delete": "이 공유를 정말 삭제하시겠습니까? 이 작업은 되돌릴 수 없습니다." + }, + "label": "페이지 공유 관리", + "description": "페이지 공유 기능을 활성화 또는 비활성화" + }, + "notification": { + "pageShareSuccess": "페이지 공유 URL이 정상적으로 업데이트되었습니다", + "someError": "문제가 발생했습니다. 나중에 다시 시도해 주세요.", + "webShareDeleteSuccess": "웹 공유가 정상적으로 삭제되었습니다" + } + }, + "ollamaSettings": { + "title": "Ollama 설정", + "heading": "Ollama 설정하기", + "settings": { + "ollamaUrl": { + "label": "Ollama URL", + "placeholder": "Ollama URL 입력" + }, + "advanced": { + "label": "Ollama URL 고급 설정", + "urlRewriteEnabled": { + "label": "사용자 지정 Origin URL 활성화 또는 비활성화" + }, + "rewriteUrl": { + "label": "사용자 지정 Origin URL", + "placeholder": "사용자 지정 Origin URL 입력" + }, + "headers": { + "label": "사용자 지정 헤더", + "add": "헤더 추가", + "key": { + "label": "헤더 키", + "placeholder": "인증" + }, + "value": { + "label": "헤더 값", + "placeholder": "베어러 토큰" + } + }, + "help": "Page Assist에서 Ollama 연결에 문제가 있는 경우 사용자 지정 Origin URL을 설정할 수 있습니다. 설정에 대한 자세한 내용은 여기를 클릭하세요." + } + } + }, + "manageSearch": { + "title": "웹 검색 관리", + "heading": "웹 검색 설정하기" + }, + "about": { + "title": "소개", + "heading": "소개", + "chromeVersion": "Page Assist 버전", + "ollamaVersion": "Ollama 버전", + "support": "Page Assist 프로젝트는 다음 플랫폼에서 기부나 후원을 통해 지원할 수 있습니다:", + "koFi": "Ko-fi로 후원하기", + "githubSponsor": "GitHub에서 후원하기", + "githubRepo": "GitHub 저장소" + }, + "manageKnowledge": { + "title": "지식 관리", + "heading": "지식 베이스 구성하기" + }, + "rag": { + "title": "RAG 설정", + "ragSettings": { + "label": "RAG 설정", + "model": { + "label": "임베딩 모델", + "required": "모델을 선택해주세요", + "help": "`nomic-embed-text`와 같은 임베딩 모델 사용을 강력히 권장합니다.", + "placeholder": "모델 선택" + }, + "chunkSize": { + "label": "청크 크기", + "placeholder": "청크 크기 입력", + "required": "청크 크기를 입력해주세요" + }, + "chunkOverlap": { + "label": "청크 오버랩", + "placeholder": "청크 오버랩 입력", + "required": "청크 오버랩을 입력해주세요" + }, + "totalFilePerKB": { + "label": "지식 베이스 기본 파일 업로드 제한", + "placeholder": "기본 파일 업로드 제한 입력 (예: 10)", + "required": "기본 파일 업로드 제한을 입력해주세요" + }, + "noOfRetrievedDocs": { + "label": "검색 문서 수", + "placeholder": "검색 문서 수 입력", + "required": "검색 문서 수를 입력해주세요" + } + }, + "prompt": { + "label": "RAG 프롬프트 설정", + "option1": "일반", + "option2": "웹", + "alert": "여기서 시스템 프롬프트를 설정하는 것은 더 이상 권장되지 않습니다. 프롬프트 추가 및 편집은 '프롬프트 관리' 섹션을 이용해주세요. 이 섹션은 향후 릴리스에서 제거될 예정입니다.", + "systemPrompt": "시스템 프롬프트", + "systemPromptPlaceholder": "시스템 프롬프트 입력", + "webSearchPrompt": "웹 검색 프롬프트", + "webSearchPromptHelp": "프롬프트에서 `{search_results}`를 제거하지 마세요.", + "webSearchPromptError": "웹 검색 프롬프트를 입력해주세요", + "webSearchPromptPlaceholder": "웹 검색 프롬프트 입력", + "webSearchFollowUpPrompt": "웹 검색 후속 프롬프트", + "webSearchFollowUpPromptHelp": "프롬프트에서 `{chat_history}`와 `{question}`를 제거하지 마세요.", + "webSearchFollowUpPromptError": "웹 검색 후속 프롬프트를 입력해주세요!", + "webSearchFollowUpPromptPlaceholder": "웹 검색 후속 프롬프트" + } + }, + "chromeAiSettings": { + "title": "Chrome AI 설정" + } +} + diff --git a/src/assets/locale/ko/sidepanel.json b/src/assets/locale/ko/sidepanel.json new file mode 100644 index 0000000..b7a5eea --- /dev/null +++ b/src/assets/locale/ko/sidepanel.json @@ -0,0 +1,5 @@ +{ + "tooltip": { + "embed": "페이지를 임베드하는 데 몇 분이 걸릴 수 있습니다. 잠시만 기다려 주세요..." + } +} From 9f383a81b64c4faf9b5bf5b66a28621ffed89818 Mon Sep 17 00:00:00 2001 From: n4ze3m Date: Sat, 9 Nov 2024 15:17:59 +0530 Subject: [PATCH 02/17] feat: Add generation info to messages This commit introduces a new feature that displays generation information for each message in the chat. The generation info is displayed in a popover and includes details about the model used, the prompt, and other relevant information. This helps users understand how their messages were generated and troubleshoot any issues that may arise. The generation info is retrieved from the LLM response and is stored in the database alongside other message details. This commit also includes translations for the generation info label in all supported languages. --- src/assets/locale/da/common.json | 3 +- src/assets/locale/de/common.json | 3 +- src/assets/locale/en/common.json | 3 +- src/assets/locale/es/common.json | 3 +- src/assets/locale/fa/common.json | 3 +- src/assets/locale/fr/common.json | 3 +- src/assets/locale/it/common.json | 3 +- src/assets/locale/ja-JP/common.json | 3 +- src/assets/locale/ko/common.json | 3 +- src/assets/locale/ml/common.json | 4 +- src/assets/locale/no/common.json | 3 +- src/assets/locale/pt-BR/common.json | 3 +- src/assets/locale/ru/common.json | 3 +- src/assets/locale/sv/common.json | 3 +- src/assets/locale/zh/common.json | 3 +- .../Common/Playground/GenerationInfo.tsx | 65 +++++++++++++ src/components/Common/Playground/Message.tsx | 17 +++- .../Option/Playground/PlaygroundChat.tsx | 1 + src/components/Sidepanel/Chat/body.tsx | 1 + src/db/index.ts | 7 +- src/hooks/chat-helper/index.ts | 15 ++- src/hooks/useMessage.tsx | 91 ++++++++++++++++--- src/hooks/useMessageOption.tsx | 63 +++++++++++-- src/models/index.ts | 2 +- src/types/message.ts | 37 ++++---- wxt.config.ts | 2 +- 26 files changed, 283 insertions(+), 64 deletions(-) create mode 100644 src/components/Common/Playground/GenerationInfo.tsx diff --git a/src/assets/locale/da/common.json b/src/assets/locale/da/common.json index 4d0de2d..881dab7 100644 --- a/src/assets/locale/da/common.json +++ b/src/assets/locale/da/common.json @@ -112,5 +112,6 @@ "older": "Ældre" }, "pin": "Fastgør", - "unpin": "Frigør" + "unpin": "Frigør", + "generationInfo": "Genererings Info" } \ No newline at end of file diff --git a/src/assets/locale/de/common.json b/src/assets/locale/de/common.json index 739e0dd..9a69b22 100644 --- a/src/assets/locale/de/common.json +++ b/src/assets/locale/de/common.json @@ -112,5 +112,6 @@ "older": "Älter" }, "pin": "Anheften", - "unpin": "Losheften" + "unpin": "Losheften", + "generationInfo": "Generierungsinformationen" } \ No newline at end of file diff --git a/src/assets/locale/en/common.json b/src/assets/locale/en/common.json index f5b9e39..a7a3ecc 100644 --- a/src/assets/locale/en/common.json +++ b/src/assets/locale/en/common.json @@ -116,5 +116,6 @@ "older": "Older" }, "pin": "Pin", - "unpin": "Unpin" + "unpin": "Unpin", + "generationInfo": "Generation Info" } diff --git a/src/assets/locale/es/common.json b/src/assets/locale/es/common.json index 65590aa..cf5ccf8 100644 --- a/src/assets/locale/es/common.json +++ b/src/assets/locale/es/common.json @@ -111,5 +111,6 @@ "older": "Más antiguo" }, "pin": "Fijar", - "unpin": "Desfijar" + "unpin": "Desfijar", + "generationInfo": "Información de Generación" } \ No newline at end of file diff --git a/src/assets/locale/fa/common.json b/src/assets/locale/fa/common.json index 4adb307..6abb70a 100644 --- a/src/assets/locale/fa/common.json +++ b/src/assets/locale/fa/common.json @@ -105,5 +105,6 @@ "older": "قدیمی‌تر" }, "pin": "پین کردن", - "unpin": "حذف پین" + "unpin": "حذف پین", + "generationInfo": "اطلاعات تولید" } \ No newline at end of file diff --git a/src/assets/locale/fr/common.json b/src/assets/locale/fr/common.json index 8dbe609..0775303 100644 --- a/src/assets/locale/fr/common.json +++ b/src/assets/locale/fr/common.json @@ -111,5 +111,6 @@ "older": "Plus ancien" }, "pin": "Épingler", - "unpin": "Désépingler" + "unpin": "Désépingler", + "generationInfo": "Informations de génération" } \ No newline at end of file diff --git a/src/assets/locale/it/common.json b/src/assets/locale/it/common.json index 80ede68..102b6c8 100644 --- a/src/assets/locale/it/common.json +++ b/src/assets/locale/it/common.json @@ -111,5 +111,6 @@ "older": "Più Vecchi" }, "pin": "Fissa", - "unpin": "Rimuovi" + "unpin": "Rimuovi", + "generationInfo": "Informazioni sulla Generazione" } \ No newline at end of file diff --git a/src/assets/locale/ja-JP/common.json b/src/assets/locale/ja-JP/common.json index df20028..6f38578 100644 --- a/src/assets/locale/ja-JP/common.json +++ b/src/assets/locale/ja-JP/common.json @@ -111,5 +111,6 @@ "older": "それ以前" }, "pin": "固定", - "unpin": "固定解除" + "unpin": "固定解除", + "generationInfo": "生成情報" } \ No newline at end of file diff --git a/src/assets/locale/ko/common.json b/src/assets/locale/ko/common.json index 908bb0d..e59e785 100644 --- a/src/assets/locale/ko/common.json +++ b/src/assets/locale/ko/common.json @@ -111,5 +111,6 @@ "older": "그 이전" }, "pin": "고정", - "unpin": "고정 해제" + "unpin": "고정 해제", + "generationInfo": "생성 정보" } diff --git a/src/assets/locale/ml/common.json b/src/assets/locale/ml/common.json index bb1149a..ed6988a 100644 --- a/src/assets/locale/ml/common.json +++ b/src/assets/locale/ml/common.json @@ -110,5 +110,7 @@ "older": "പഴയത്" }, "pin": "പിൻ ചെയ്യുക", - "unpin": "അൺപിൻ ചെയ്യുക" + "unpin": "അൺപിൻ ചെയ്യുക", + "generationInfo": "ജനറേഷൻ വിവരങ്ങൾ" + } \ No newline at end of file diff --git a/src/assets/locale/no/common.json b/src/assets/locale/no/common.json index 6665280..b95ffce 100644 --- a/src/assets/locale/no/common.json +++ b/src/assets/locale/no/common.json @@ -112,5 +112,6 @@ "older": "Eldre" }, "pin": "Fest", - "unpin": "Løsne" + "unpin": "Løsne", + "generationInfo": "Generasjonsinformasjon" } \ No newline at end of file diff --git a/src/assets/locale/pt-BR/common.json b/src/assets/locale/pt-BR/common.json index ba4f252..a2f1a41 100644 --- a/src/assets/locale/pt-BR/common.json +++ b/src/assets/locale/pt-BR/common.json @@ -111,5 +111,6 @@ "older": "Mais Antigos" }, "pin": "Fixar", - "unpin": "Desafixar" + "unpin": "Desafixar", + "generationInfo": "Informações de Geração" } \ No newline at end of file diff --git a/src/assets/locale/ru/common.json b/src/assets/locale/ru/common.json index b619def..04b54de 100644 --- a/src/assets/locale/ru/common.json +++ b/src/assets/locale/ru/common.json @@ -111,5 +111,6 @@ "older": "Ранее" }, "pin": "Закрепить", - "unpin": "Открепить" + "unpin": "Открепить", + "generationInfo": "Информация о генерации" } \ No newline at end of file diff --git a/src/assets/locale/sv/common.json b/src/assets/locale/sv/common.json index 3127267..b0df981 100644 --- a/src/assets/locale/sv/common.json +++ b/src/assets/locale/sv/common.json @@ -116,5 +116,6 @@ "older": "Äldre" }, "pin": "Fäst", - "unpin": "Ta bort fäst" + "unpin": "Ta bort fäst", + "generationInfo": "Generationsinformation" } diff --git a/src/assets/locale/zh/common.json b/src/assets/locale/zh/common.json index cd58e05..7fb9f9d 100644 --- a/src/assets/locale/zh/common.json +++ b/src/assets/locale/zh/common.json @@ -111,5 +111,6 @@ "older": "更早" }, "pin": "置顶", - "unpin": "取消置顶" + "unpin": "取消置顶", + "generationInfo": "生成信息" } \ No newline at end of file diff --git a/src/components/Common/Playground/GenerationInfo.tsx b/src/components/Common/Playground/GenerationInfo.tsx new file mode 100644 index 0000000..4e34710 --- /dev/null +++ b/src/components/Common/Playground/GenerationInfo.tsx @@ -0,0 +1,65 @@ +type GenerationMetrics = { + total_duration?: number + load_duration?: number + prompt_eval_count?: number + prompt_eval_duration?: number + eval_count?: number + eval_duration?: number + context?: string + response?: string +} + +type Props = { + generationInfo: GenerationMetrics +} + +export const GenerationInfo = ({ generationInfo }: Props) => { + if (!generationInfo) return null + + const calculateTokensPerSecond = ( + evalCount?: number, + evalDuration?: number + ) => { + if (!evalCount || !evalDuration) return 0 + return (evalCount / evalDuration) * 1e9 + } + + const formatDuration = (nanoseconds?: number) => { + if (!nanoseconds) return "0ms" + const ms = nanoseconds / 1e6 + if (ms < 1) return `${ms.toFixed(3)}ms` + if (ms < 1000) return `${Math.round(ms)}ms` + return `${(ms / 1000).toFixed(2)}s` + } + + const metricsToDisplay = { + ...generationInfo, + ...(generationInfo?.eval_count && generationInfo?.eval_duration + ? { + tokens_per_second: calculateTokensPerSecond( + generationInfo.eval_count, + generationInfo.eval_duration + ).toFixed(2) + } + : {}) + } + + return ( +
+
+ {Object.entries(metricsToDisplay) + .filter(([key]) => key !== "model") + .map(([key, value]) => ( +
+
{key}
+
+ {key.includes("duration") + ? formatDuration(value as number) + : String(value)} +
+
+ ))} +
+
+ ) +} diff --git a/src/components/Common/Playground/Message.tsx b/src/components/Common/Playground/Message.tsx index 0c6299c..925fe4c 100644 --- a/src/components/Common/Playground/Message.tsx +++ b/src/components/Common/Playground/Message.tsx @@ -1,10 +1,11 @@ import Markdown from "../../Common/Markdown" import React from "react" -import { Tag, Image, Tooltip, Collapse } from "antd" +import { Tag, Image, Tooltip, Collapse, Popover } from "antd" import { WebSearch } from "./WebSearch" import { CheckIcon, ClipboardIcon, + InfoIcon, Pen, PlayIcon, RotateCcw, @@ -16,6 +17,7 @@ import { MessageSource } from "./MessageSource" import { useTTS } from "@/hooks/useTTS" import { tagColors } from "@/utils/color" import { removeModelSuffix } from "@/db/models" +import { GenerationInfo } from "./GenerationInfo" type Props = { message: string @@ -37,6 +39,7 @@ type Props = { hideEditAndRegenerate?: boolean onSourceClick?: (source: any) => void isTTSEnabled?: boolean + generationInfo?: any } export const PlaygroundMessage = (props: Props) => { @@ -206,6 +209,18 @@ export const PlaygroundMessage = (props: Props) => { )} + {props.generationInfo && ( + + } + title={t("generationInfo")}> + + + )} + {!props.hideEditAndRegenerate && props.currentMessageIndex === props.totalMessages - 1 && ( diff --git a/src/components/Option/Playground/PlaygroundChat.tsx b/src/components/Option/Playground/PlaygroundChat.tsx index 5f2adfe..aaf3462 100644 --- a/src/components/Option/Playground/PlaygroundChat.tsx +++ b/src/components/Option/Playground/PlaygroundChat.tsx @@ -54,6 +54,7 @@ export const PlaygroundChat = () => { setIsSourceOpen(true) }} isTTSEnabled={ttsEnabled} + generationInfo={message?.generationInfo} /> ))} {messages.length > 0 && ( diff --git a/src/components/Sidepanel/Chat/body.tsx b/src/components/Sidepanel/Chat/body.tsx index 61d9071..f9f1407 100644 --- a/src/components/Sidepanel/Chat/body.tsx +++ b/src/components/Sidepanel/Chat/body.tsx @@ -47,6 +47,7 @@ export const SidePanelBody = () => { setIsSourceOpen(true) }} isTTSEnabled={ttsEnabled} + generationInfo={message?.generationInfo} /> ))}
diff --git a/src/db/index.ts b/src/db/index.ts index 4a77553..68b3b02 100644 --- a/src/db/index.ts +++ b/src/db/index.ts @@ -33,6 +33,7 @@ type Message = { search?: WebSearch createdAt: number messageType?: string + generationInfo?: any } type Webshare = { @@ -254,7 +255,8 @@ export const saveMessage = async ( images: string[], source?: any[], time?: number, - message_type?: string + message_type?: string, + generationInfo?: any ) => { const id = generateID() let createdAt = Date.now() @@ -270,7 +272,8 @@ export const saveMessage = async ( images, createdAt, sources: source, - messageType: message_type + messageType: message_type, + generationInfo: generationInfo } const db = new PageAssitDatabase() await db.addMessage(message) diff --git a/src/hooks/chat-helper/index.ts b/src/hooks/chat-helper/index.ts index 18f89be..3f725c0 100644 --- a/src/hooks/chat-helper/index.ts +++ b/src/hooks/chat-helper/index.ts @@ -118,7 +118,7 @@ export const saveMessageOnSuccess = async ({ fullText, source, message_source = "web-ui", - message_type + message_type, generationInfo }: { historyId: string | null setHistoryId: (historyId: string) => void @@ -130,6 +130,7 @@ export const saveMessageOnSuccess = async ({ source: any[] message_source?: "copilot" | "web-ui", message_type?: string + generationInfo?: any }) => { if (historyId) { if (!isRegenerate) { @@ -141,7 +142,8 @@ export const saveMessageOnSuccess = async ({ [image], [], 1, - message_type + message_type, + generationInfo ) } await saveMessage( @@ -152,7 +154,8 @@ export const saveMessageOnSuccess = async ({ [], source, 2, - message_type + message_type, + generationInfo ) await setLastUsedChatModel(historyId, selectedModel!) } else { @@ -166,7 +169,8 @@ export const saveMessageOnSuccess = async ({ [image], [], 1, - message_type + message_type, + generationInfo ) await saveMessage( newHistoryId.id, @@ -176,7 +180,8 @@ export const saveMessageOnSuccess = async ({ [], source, 2, - message_type + message_type, + generationInfo ) setHistoryId(newHistoryId.id) await setLastUsedChatModel(newHistoryId.id, selectedModel!) diff --git a/src/hooks/useMessage.tsx b/src/hooks/useMessage.tsx index c85324a..d9a17ac 100644 --- a/src/hooks/useMessage.tsx +++ b/src/hooks/useMessage.tsx @@ -328,10 +328,25 @@ export const useMessage = () => { const applicationChatHistory = generateHistory(history, selectedModel) + let generationInfo: any | undefined = undefined + const chunks = await ollama.stream( [...applicationChatHistory, humanMessage], { - signal: signal + signal: signal, + callbacks: [ + { + handleLLMEnd( + output: any, + ): any { + try { + generationInfo = output?.generations?.[0][0]?.generationInfo + } catch (e) { + console.log("handleLLMEnd error", e) + } + } + } + ] } ) let count = 0 @@ -361,7 +376,8 @@ export const useMessage = () => { return { ...message, message: fullText, - sources: source + sources: source, + generationInfo } } return message @@ -390,7 +406,8 @@ export const useMessage = () => { image, fullText, source, - message_source: "copilot" + message_source: "copilot", + generationInfo }) setIsProcessing(false) @@ -544,10 +561,25 @@ export const useMessage = () => { ) } + let generationInfo: any | undefined = undefined + const chunks = await ollama.stream( [...applicationChatHistory, humanMessage], { - signal: signal + signal: signal, + callbacks: [ + { + handleLLMEnd( + output: any, + ): any { + try { + generationInfo = output?.generations?.[0][0]?.generationInfo + } catch (e) { + console.log("handleLLMEnd error", e) + } + } + } + ] } ) let count = 0 @@ -576,7 +608,8 @@ export const useMessage = () => { if (message.id === generateMessageId) { return { ...message, - message: fullText + message: fullText, + generationInfo } } return message @@ -605,7 +638,8 @@ export const useMessage = () => { image, fullText, source: [], - message_source: "copilot" + message_source: "copilot", + generationInfo }) setIsProcessing(false) @@ -789,10 +823,24 @@ export const useMessage = () => { ) } + let generationInfo: any | undefined = undefined const chunks = await ollama.stream( [...applicationChatHistory, humanMessage], { - signal: signal + signal: signal, + callbacks: [ + { + handleLLMEnd( + output: any, + ): any { + try { + generationInfo = output?.generations?.[0][0]?.generationInfo + } catch (e) { + console.log("handleLLMEnd error", e) + } + } + } + ] } ) let count = 0 @@ -822,7 +870,8 @@ export const useMessage = () => { return { ...message, message: fullText, - sources: source + sources: source, + generationInfo } } return message @@ -850,7 +899,8 @@ export const useMessage = () => { message, image, fullText, - source + source, + generationInfo }) setIsProcessing(false) @@ -982,8 +1032,23 @@ export const useMessage = () => { }) } + let generationInfo: any | undefined = undefined + const chunks = await ollama.stream([humanMessage], { - signal: signal + signal: signal, + callbacks: [ + { + handleLLMEnd( + output: any, + ): any { + try { + generationInfo = output?.generations?.[0][0]?.generationInfo + } catch (e) { + console.log("handleLLMEnd error", e) + } + } + } + ] }) let count = 0 for await (const chunk of chunks) { @@ -1011,7 +1076,8 @@ export const useMessage = () => { if (message.id === generateMessageId) { return { ...message, - message: fullText + message: fullText, + generationInfo } } return message @@ -1042,7 +1108,8 @@ export const useMessage = () => { fullText, source: [], message_source: "copilot", - message_type: messageType + message_type: messageType, + generationInfo }) setIsProcessing(false) diff --git a/src/hooks/useMessageOption.tsx b/src/hooks/useMessageOption.tsx index 58d56bf..1cccb62 100644 --- a/src/hooks/useMessageOption.tsx +++ b/src/hooks/useMessageOption.tsx @@ -243,10 +243,23 @@ export const useMessageOption = () => { ) } + let generationInfo: any | undefined = undefined + const chunks = await ollama.stream( [...applicationChatHistory, humanMessage], { - signal: signal + signal: signal, + callbacks: [ + { + handleLLMEnd(output: any): any { + try { + generationInfo = output?.generations?.[0][0]?.generationInfo + } catch (e) { + console.log("handleLLMEnd error", e) + } + } + } + ] } ) let count = 0 @@ -276,7 +289,8 @@ export const useMessageOption = () => { return { ...message, message: fullText, - sources: source + sources: source, + generationInfo } } return message @@ -304,7 +318,8 @@ export const useMessageOption = () => { message, image, fullText, - source + source, + generationInfo }) setIsProcessing(false) @@ -465,10 +480,23 @@ export const useMessageOption = () => { ) } + let generationInfo: any | undefined = undefined + const chunks = await ollama.stream( [...applicationChatHistory, humanMessage], { - signal: signal + signal: signal, + callbacks: [ + { + handleLLMEnd(output: any): any { + try { + generationInfo = output?.generations?.[0][0]?.generationInfo + } catch (e) { + console.log("handleLLMEnd error", e) + } + } + } + ], } ) @@ -498,7 +526,8 @@ export const useMessageOption = () => { if (message.id === generateMessageId) { return { ...message, - message: fullText + message: fullText, + generationInfo } } return message @@ -526,7 +555,8 @@ export const useMessageOption = () => { message, image, fullText, - source: [] + source: [], + generationInfo }) setIsProcessing(false) @@ -711,10 +741,23 @@ export const useMessageOption = () => { const applicationChatHistory = generateHistory(history, selectedModel) + let generationInfo: any | undefined = undefined + const chunks = await ollama.stream( [...applicationChatHistory, humanMessage], { - signal: signal + signal: signal, + callbacks: [ + { + handleLLMEnd(output: any): any { + try { + generationInfo = output?.generations?.[0][0]?.generationInfo + } catch (e) { + console.log("handleLLMEnd error", e) + } + } + } + ] } ) let count = 0 @@ -744,7 +787,8 @@ export const useMessageOption = () => { return { ...message, message: fullText, - sources: source + sources: source, + generationInfo } } return message @@ -772,7 +816,8 @@ export const useMessageOption = () => { message, image, fullText, - source + source, + generationInfo }) setIsProcessing(false) diff --git a/src/models/index.ts b/src/models/index.ts index d459e66..02f2ce8 100644 --- a/src/models/index.ts +++ b/src/models/index.ts @@ -49,7 +49,7 @@ export const pageAssistModel = async ({ configuration: { apiKey: providerInfo.apiKey || "temp", baseURL: providerInfo.baseUrl || "", - } + }, }) as any } diff --git a/src/types/message.ts b/src/types/message.ts index 3be7cdc..1ec1d5a 100644 --- a/src/types/message.ts +++ b/src/types/message.ts @@ -1,19 +1,20 @@ type WebSearch = { - search_engine: string - search_url: string - search_query: string - search_results: { - title: string - link: string - }[] - } - export type Message = { - isBot: boolean - name: string - message: string - sources: any[] - images?: string[] - search?: WebSearch - messageType?: string - id?: string - } \ No newline at end of file + search_engine: string + search_url: string + search_query: string + search_results: { + title: string + link: string + }[] +} +export type Message = { + isBot: boolean + name: string + message: string + sources: any[] + images?: string[] + search?: WebSearch + messageType?: string + id?: string + generationInfo?: any +} \ No newline at end of file diff --git a/wxt.config.ts b/wxt.config.ts index c870e8e..288a555 100644 --- a/wxt.config.ts +++ b/wxt.config.ts @@ -50,7 +50,7 @@ export default defineConfig({ outDir: "build", manifest: { - version: "1.3.3", + version: "1.3.4", name: process.env.TARGET === "firefox" ? "Page Assist - A Web UI for Local AI Models" From 7c805cfe22661f7dd0673a75297d896b0a596086 Mon Sep 17 00:00:00 2001 From: n4ze3m Date: Sat, 9 Nov 2024 15:20:16 +0530 Subject: [PATCH 03/17] feat: Include generation info in message history Adds the `generationInfo` field to the message history output to provide more context about the message's origin. This will be helpful for debugging and understanding the provenance of messages. --- src/db/index.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/db/index.ts b/src/db/index.ts index 68b3b02..e4c136c 100644 --- a/src/db/index.ts +++ b/src/db/index.ts @@ -301,7 +301,8 @@ export const formatToMessage = (messages: MessageHistory): MessageType[] => { message: message.content, name: message.name, sources: message?.sources || [], - images: message.images || [] + images: message.images || [], + generationInfo: message?.generationInfo, } }) } From fd654cafdb8e31dd91d19b5c17e3141483e7f9ec Mon Sep 17 00:00:00 2001 From: n4ze3m Date: Sat, 9 Nov 2024 16:56:47 +0530 Subject: [PATCH 04/17] feat: Add max tokens setting for model generations Adds a new setting to control the maximum number of tokens generated by the model. This provides more control over the length of responses and can be useful for limiting the amount of text generated in certain situations. --- src/assets/locale/da/common.json | 4 ++++ src/assets/locale/de/common.json | 4 ++++ src/assets/locale/en/common.json | 4 ++++ src/assets/locale/es/common.json | 4 ++++ src/assets/locale/fa/common.json | 4 ++++ src/assets/locale/fr/common.json | 4 ++++ src/assets/locale/it/common.json | 4 ++++ src/assets/locale/ja-JP/common.json | 4 ++++ src/assets/locale/ko/common.json | 5 +++- src/assets/locale/ml/common.json | 4 ++++ src/assets/locale/no/common.json | 4 ++++ src/assets/locale/pt-BR/common.json | 4 ++++ src/assets/locale/ru/common.json | 4 ++++ src/assets/locale/sv/common.json | 4 ++++ src/assets/locale/zh/common.json | 4 ++++ .../Settings/CurrentChatModelSettings.tsx | 13 +++++++++- .../Option/Settings/model-settings.tsx | 9 ++++++- src/hooks/useMessage.tsx | 24 ++++++++++++++----- src/hooks/useMessageOption.tsx | 19 +++++++++++---- src/models/index.ts | 10 +++++--- 20 files changed, 119 insertions(+), 17 deletions(-) diff --git a/src/assets/locale/da/common.json b/src/assets/locale/da/common.json index 881dab7..de82642 100644 --- a/src/assets/locale/da/common.json +++ b/src/assets/locale/da/common.json @@ -70,6 +70,10 @@ "label": "Længden af Kontekst", "placeholder": "Instast Længden af Kontekst værdien (standard: 2048)" }, + "numPredict": { + "label": "Maks Tokens (num_predict)", + "placeholder": "Indtast Maks Tokens værdi (fx. 2048, 4096)" + }, "seed": { "label": "Seed", "placeholder": "Indtast Seed værdi (fx. 1234)", diff --git a/src/assets/locale/de/common.json b/src/assets/locale/de/common.json index 9a69b22..65b72ab 100644 --- a/src/assets/locale/de/common.json +++ b/src/assets/locale/de/common.json @@ -70,6 +70,10 @@ "label": "Anzahl der Kontexte", "placeholder": "Geben Sie die Anzahl der Kontexte ein (Standard: 2048)" }, + "numPredict": { + "label": "Max Tokens (num_predict)", + "placeholder": "Geben Sie den Max-Tokens-Wert ein (z.B. 2048, 4096)" + }, "seed": { "label": "Seed", "placeholder": "Geben Sie den Seed-Wert ein (z.B. 1234)", diff --git a/src/assets/locale/en/common.json b/src/assets/locale/en/common.json index a7a3ecc..d033144 100644 --- a/src/assets/locale/en/common.json +++ b/src/assets/locale/en/common.json @@ -70,6 +70,10 @@ "label": "Number of Contexts", "placeholder": "Enter Number of Contexts value (default: 2048)" }, + "numPredict": { + "label": "Max Tokens (num_predict)", + "placeholder": "Enter Max Tokens value (e.g. 2048, 4096)" + }, "seed": { "label": "Seed", "placeholder": "Enter Seed value (e.g. 1234)", diff --git a/src/assets/locale/es/common.json b/src/assets/locale/es/common.json index cf5ccf8..210e043 100644 --- a/src/assets/locale/es/common.json +++ b/src/assets/locale/es/common.json @@ -70,6 +70,10 @@ "label": "Cantidad de contextos", "placeholder": "Ingresar el valor de tamaño de la ventana de contexto (por defecto: 2048)" }, + "numPredict": { + "label": "Máximo de Tokens (num_predict)", + "placeholder": "Ingrese el valor máximo de Tokens (ej: 2048, 4096)" + }, "seed": { "label": "Semilla", "placeholder": "Ingresar el valor de la semilla (ej: 1234)", diff --git a/src/assets/locale/fa/common.json b/src/assets/locale/fa/common.json index 6abb70a..72301c7 100644 --- a/src/assets/locale/fa/common.json +++ b/src/assets/locale/fa/common.json @@ -70,6 +70,10 @@ "label": "Number of Contexts", "placeholder": "مقدار Number of Contexts را وارد کنید (پیش فرض: 2048)" }, + "numPredict": { + "label": "حداکثر توکن‌ها (num_predict)", + "placeholder": "مقدار حداکثر توکن‌ها را وارد کنید (مثلا 2048، 4096)" + }, "seed": { "label": "Seed", "placeholder": "مقدار Seed را وارد کنید (e.g. 1234)", diff --git a/src/assets/locale/fr/common.json b/src/assets/locale/fr/common.json index 0775303..d11ef8c 100644 --- a/src/assets/locale/fr/common.json +++ b/src/assets/locale/fr/common.json @@ -70,6 +70,10 @@ "label": "Nombre de contextes", "placeholder": "Entrez la valeur du nombre de contextes (par défaut: 2048)" }, + "numPredict": { + "label": "Tokens maximum (num_predict)", + "placeholder": "Entrez la valeur des tokens maximum (par exemple 2048, 4096)" + }, "seed": { "label": "Graine", "placeholder": "Entrez la valeur des semences (par exemple 1234)", diff --git a/src/assets/locale/it/common.json b/src/assets/locale/it/common.json index 102b6c8..6b08509 100644 --- a/src/assets/locale/it/common.json +++ b/src/assets/locale/it/common.json @@ -70,6 +70,10 @@ "label": "Dimensione del Contesto", "placeholder": "Inserisci la Dimensione del Contesto (default: 2048)" }, + "numPredict": { + "label": "Token Massimi (num_predict)", + "placeholder": "Inserisci il valore dei Token Massimi (es. 2048, 4096)" + }, "seed": { "label": "Seed", "placeholder": "Inserisci il Valore Seed (e.g. 1234)", diff --git a/src/assets/locale/ja-JP/common.json b/src/assets/locale/ja-JP/common.json index 6f38578..92ba8e0 100644 --- a/src/assets/locale/ja-JP/common.json +++ b/src/assets/locale/ja-JP/common.json @@ -70,6 +70,10 @@ "label": "コンテキストの数", "placeholder": "コンテキスト数を入力してください(デフォルト:2048)" }, + "numPredict": { + "label": "最大トークン数 (num_predict)", + "placeholder": "最大トークン数を入力してください(例:2048、4096)" + }, "seed": { "label": "シード", "placeholder": "シード値を入力してください(例:1234)", diff --git a/src/assets/locale/ko/common.json b/src/assets/locale/ko/common.json index e59e785..ed9e0b9 100644 --- a/src/assets/locale/ko/common.json +++ b/src/assets/locale/ko/common.json @@ -70,7 +70,10 @@ "label": "컨텍스트 수", "placeholder": "컨텍스트 수를 입력하세요 (기본값: 2048)" }, - "seed": { + "numPredict": { + "label": "최대 토큰 수 (num_predict)", + "placeholder": "최대 토큰 수를 입력하세요 (예: 2048, 4096)" + }, "seed": { "label": "시드", "placeholder": "시드 값을 입력하세요 (예: 1234)", "help": "모델 출력의 재현성" diff --git a/src/assets/locale/ml/common.json b/src/assets/locale/ml/common.json index ed6988a..cd6868d 100644 --- a/src/assets/locale/ml/common.json +++ b/src/assets/locale/ml/common.json @@ -69,6 +69,10 @@ "label": "സന്ദർഭങ്ങളുടെ എണ്ണം", "placeholder": "സന്ദർഭങ്ങളുടെ സംഖ്യ നൽകുക (സ്ഥിരം: 2048)" }, + "numPredict": { + "label": "പരമാവധി ടോക്കണുകൾ (num_predict)", + "placeholder": "പരമാവധി ടോക്കൺ മൂല്യം നൽകുക (ഉദാ: 2048, 4096)" + }, "seed": { "label": "സീഡ്", "placeholder": "സീഡ് വില്യമ നൽകുക (ഉദാ: 1234)", diff --git a/src/assets/locale/no/common.json b/src/assets/locale/no/common.json index b95ffce..1720d1f 100644 --- a/src/assets/locale/no/common.json +++ b/src/assets/locale/no/common.json @@ -70,6 +70,10 @@ "label": "Kontekstlengde", "placeholder": "Skriv inn kontekstlengdeverdi (standard: 2048)" }, + "numPredict": { + "label": "Maks Tokens (num_predict)", + "placeholder": "Skriv inn Maks Tokens-verdi (f.eks. 2048, 4096)" + }, "seed": { "label": "Seed", "placeholder": "Skriv inn seedverdi (f.eks. 1234)", diff --git a/src/assets/locale/pt-BR/common.json b/src/assets/locale/pt-BR/common.json index a2f1a41..660514d 100644 --- a/src/assets/locale/pt-BR/common.json +++ b/src/assets/locale/pt-BR/common.json @@ -70,6 +70,10 @@ "label": "Número de Contextos", "placeholder": "Digite o valor do Número de Contextos (padrão: 2048)" }, + "numPredict": { + "label": "Máximo de Tokens (num_predict)", + "placeholder": "Digite o valor do Máximo de Tokens (ex: 2048, 4096)" + }, "seed": { "label": "Semente", "placeholder": "Digite o valor da Semente (ex: 1234)", diff --git a/src/assets/locale/ru/common.json b/src/assets/locale/ru/common.json index 04b54de..31291a0 100644 --- a/src/assets/locale/ru/common.json +++ b/src/assets/locale/ru/common.json @@ -70,6 +70,10 @@ "label": "Количество контекстов", "placeholder": "Введите значение количества контекстов (по умолчанию: 2048)" }, + "numPredict": { + "label": "Максимальное количество токенов (num_predict)", + "placeholder": "Введите значение максимального количества токенов (например, 2048, 4096)" + }, "seed": { "label": "Сид", "placeholder": "Введите значение сида (например, 1234)", diff --git a/src/assets/locale/sv/common.json b/src/assets/locale/sv/common.json index b0df981..4698dc7 100644 --- a/src/assets/locale/sv/common.json +++ b/src/assets/locale/sv/common.json @@ -70,6 +70,10 @@ "label": "Antal kontexter", "placeholder": "Ange antal kontextvärden (standard: 2048)" }, + "numPredict": { + "label": "Max antal tokens (num_predict)", + "placeholder": "Ange Max antal tokens värde (t.ex. 2048, 4096)" + }, "seed": { "label": "Frö", "placeholder": "Ange frövärde (t.ex. 1234)", diff --git a/src/assets/locale/zh/common.json b/src/assets/locale/zh/common.json index 7fb9f9d..080776a 100644 --- a/src/assets/locale/zh/common.json +++ b/src/assets/locale/zh/common.json @@ -70,6 +70,10 @@ "label": "上下文数量", "placeholder": "输入上下文数量(默认:2048)" }, + "numPredict": { + "label": "最大令牌数 (num_predict)", + "placeholder": "输入最大令牌数(例如:2048、4096)" + }, "seed": { "label": "随机种子", "placeholder": "输入随机种子值(例如:1234)", diff --git a/src/components/Common/Settings/CurrentChatModelSettings.tsx b/src/components/Common/Settings/CurrentChatModelSettings.tsx index 3e5c74f..cb37286 100644 --- a/src/components/Common/Settings/CurrentChatModelSettings.tsx +++ b/src/components/Common/Settings/CurrentChatModelSettings.tsx @@ -39,12 +39,14 @@ export const CurrentChatModelSettings = ({ numCtx: cUserSettings.numCtx ?? data.numCtx, seed: cUserSettings.seed, numGpu: cUserSettings.numGpu ?? data.numGpu, + numPredict: cUserSettings.numPredict ?? data.numPredict, systemPrompt: cUserSettings.systemPrompt ?? "" }) return data }, enabled: open, - refetchOnMount: true + refetchOnMount: false, + refetchOnWindowFocus: false }) const renderBody = () => { @@ -115,6 +117,15 @@ export const CurrentChatModelSettings = ({ /> + + + + { size="large" /> - + + + { currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx, seed: currentChatModelSettings?.seed, numGpu: - currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu + currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu, + numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict, + }) let newMessage: Message[] = [] @@ -261,7 +263,9 @@ export const useMessage = () => { userDefaultModelSettings?.numCtx, seed: currentChatModelSettings?.seed, numGpu: - currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu + currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu, + numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict, + }) const response = await questionOllama.invoke(promptForQuestion) query = response.content.toString() @@ -475,7 +479,9 @@ export const useMessage = () => { currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx, seed: currentChatModelSettings?.seed, numGpu: - currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu + currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu, + numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict, + }) let newMessage: Message[] = [] @@ -702,7 +708,9 @@ export const useMessage = () => { currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx, seed: currentChatModelSettings?.seed, numGpu: - currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu + currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu, + numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict, + }) let newMessage: Message[] = [] @@ -777,7 +785,9 @@ export const useMessage = () => { userDefaultModelSettings?.numCtx, seed: currentChatModelSettings?.seed, numGpu: - currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu + currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu, + numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict, + }) const response = await questionOllama.invoke(promptForQuestion) query = response.content.toString() @@ -964,7 +974,9 @@ export const useMessage = () => { currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx, seed: currentChatModelSettings?.seed, numGpu: - currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu + currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu, + numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict, + }) let newMessage: Message[] = [] diff --git a/src/hooks/useMessageOption.tsx b/src/hooks/useMessageOption.tsx index 1cccb62..959cb44 100644 --- a/src/hooks/useMessageOption.tsx +++ b/src/hooks/useMessageOption.tsx @@ -122,7 +122,9 @@ export const useMessageOption = () => { currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx, seed: currentChatModelSettings?.seed, numGpu: - currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu + currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu, + numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict, + }) let newMessage: Message[] = [] @@ -197,7 +199,9 @@ export const useMessageOption = () => { userDefaultModelSettings?.numCtx, seed: currentChatModelSettings?.seed, numGpu: - currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu + currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu, + numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict, + }) const response = await questionOllama.invoke(promptForQuestion) query = response.content.toString() @@ -381,7 +385,8 @@ export const useMessageOption = () => { currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx, seed: currentChatModelSettings?.seed, numGpu: - currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu + currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu, + numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict, }) let newMessage: Message[] = [] @@ -616,7 +621,9 @@ export const useMessageOption = () => { currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx, seed: currentChatModelSettings?.seed, numGpu: - currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu + currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu, + numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict, + }) let newMessage: Message[] = [] @@ -707,7 +714,9 @@ export const useMessageOption = () => { userDefaultModelSettings?.numCtx, seed: currentChatModelSettings?.seed, numGpu: - currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu + currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu, + numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict, + }) const response = await questionOllama.invoke(promptForQuestion) query = response.content.toString() diff --git a/src/models/index.ts b/src/models/index.ts index 02f2ce8..4798f78 100644 --- a/src/models/index.ts +++ b/src/models/index.ts @@ -13,7 +13,8 @@ export const pageAssistModel = async ({ topP, numCtx, seed, - numGpu + numGpu, + numPredict, }: { model: string baseUrl: string @@ -24,12 +25,13 @@ export const pageAssistModel = async ({ numCtx?: number seed?: number numGpu?: number + numPredict?: number }) => { if (model === "chrome::gemini-nano::page-assist") { return new ChatChromeAI({ temperature, - topK + topK, }) } @@ -46,6 +48,7 @@ export const pageAssistModel = async ({ openAIApiKey: providerInfo.apiKey || "temp", temperature, topP, + maxTokens: numPredict, configuration: { apiKey: providerInfo.apiKey || "temp", baseURL: providerInfo.baseUrl || "", @@ -64,7 +67,8 @@ export const pageAssistModel = async ({ numCtx, seed, model, - numGpu + numGpu, + numPredict }) From 977723f71f7cf8ea005f2a777b582b5909fad6a3 Mon Sep 17 00:00:00 2001 From: n4ze3m Date: Sat, 9 Nov 2024 17:13:23 +0530 Subject: [PATCH 05/17] feat: Ability to send image without text --- src/components/Option/Playground/PlaygroundForm.tsx | 8 +++++--- src/components/Sidepanel/Chat/form.tsx | 8 +++++--- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/components/Option/Playground/PlaygroundForm.tsx b/src/components/Option/Playground/PlaygroundForm.tsx index be312e2..4e0d637 100644 --- a/src/components/Option/Playground/PlaygroundForm.tsx +++ b/src/components/Option/Playground/PlaygroundForm.tsx @@ -159,7 +159,7 @@ export const PlaygroundForm = ({ dropedFile }: Props) => { e.preventDefault() stopListening() form.onSubmit(async (value) => { - if (value.message.trim().length === 0) { + if (value.message.trim().length === 0 && value.image.length === 0) { return } if (!selectedModel || selectedModel.length === 0) { @@ -228,6 +228,9 @@ export const PlaygroundForm = ({ dropedFile }: Props) => { return } } + if (value.message.trim().length === 0 && value.image.length === 0) { + return + } form.reset() textAreaFocus() await sendMessage({ @@ -261,7 +264,6 @@ export const PlaygroundForm = ({ dropedFile }: Props) => { onKeyDown={(e) => handleKeyDown(e)} ref={textareaRef} className="px-2 py-2 w-full resize-none bg-transparent focus-within:outline-none focus:ring-0 focus-visible:ring-0 ring-0 dark:ring-0 border-0 dark:text-gray-100" - required onPaste={handlePaste} rows={1} style={{ minHeight: "40px" }} @@ -409,4 +411,4 @@ export const PlaygroundForm = ({ dropedFile }: Props) => { ) -} +} \ No newline at end of file diff --git a/src/components/Sidepanel/Chat/form.tsx b/src/components/Sidepanel/Chat/form.tsx index e1cc3f0..618431d 100644 --- a/src/components/Sidepanel/Chat/form.tsx +++ b/src/components/Sidepanel/Chat/form.tsx @@ -75,10 +75,10 @@ export const SidepanelForm = ({ dropedFile }: Props) => { ) { e.preventDefault() form.onSubmit(async (value) => { - await stopListening() - if (value.message.trim().length === 0) { + if (value.message.trim().length === 0 && value.image.length === 0) { return } + await stopListening() if (!selectedModel || selectedModel.length === 0) { form.setFieldError("message", t("formError.noModel")) return @@ -237,6 +237,9 @@ export const SidepanelForm = ({ dropedFile }: Props) => { } } await stopListening() + if (value.message.trim().length === 0 && value.image.length === 0) { + return + } form.reset() textAreaFocus() await sendMessage({ @@ -260,7 +263,6 @@ export const SidepanelForm = ({ dropedFile }: Props) => { onKeyDown={(e) => handleKeyDown(e)} ref={textareaRef} className="px-2 py-2 w-full resize-none bg-transparent focus-within:outline-none focus:ring-0 focus-visible:ring-0 ring-0 dark:ring-0 border-0 dark:text-gray-100" - required onPaste={handlePaste} rows={1} style={{ minHeight: "60px" }} From 88d0cb68ae6834c4bd4d68b6ed822359de3ea86d Mon Sep 17 00:00:00 2001 From: n4ze3m Date: Sat, 9 Nov 2024 17:58:23 +0530 Subject: [PATCH 06/17] feat: Support for new AI capabilities Adds support for the new AI capabilities in Chrome. This change includes updated logic for checking availability and creating text sessions. --- src/models/utils/chrome.ts | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/src/models/utils/chrome.ts b/src/models/utils/chrome.ts index 16e0b8b..3d42686 100644 --- a/src/models/utils/chrome.ts +++ b/src/models/utils/chrome.ts @@ -3,13 +3,19 @@ export const checkChromeAIAvailability = async (): Promise<"readily" | "no" | "a try { const ai = (window as any).ai; - // upcoming version change + // latest i guess + if (ai?.languageModel?.capabilities) { + const capabilities = await ai.languageModel.capabilities(); + return capabilities?.available ?? "no"; + } + + // old version change if (ai?.assistant?.capabilities) { const capabilities = await ai.assistant.capabilities(); return capabilities?.available ?? "no"; } - // old version + // too old version if (ai?.canCreateTextSession) { const available = await ai.canCreateTextSession(); return available ?? "no"; @@ -33,7 +39,15 @@ export interface AITextSession { export const createAITextSession = async (data: any): Promise => { const ai = (window as any).ai; - // upcoming version change + // new version i guess + if (ai?.languageModel?.create) { + const session = await ai.languageModel.create({ + ...data + }) + return session + } + + // old version change if (ai?.assistant?.create) { const session = await ai.assistant.create({ ...data @@ -41,7 +55,7 @@ export const createAITextSession = async (data: any): Promise => return session } - // old version + // too old version if (ai.createTextSession) { const session = await ai.createTextSession({ ...data From 8fbdfc35d3a45c3b0d63d8f94b225bafc58add3f Mon Sep 17 00:00:00 2001 From: n4ze3m Date: Sat, 9 Nov 2024 18:09:47 +0530 Subject: [PATCH 07/17] feat(settings): Use selected system prompt in current chat model temp system fallback Adds support for using the currently selected system prompt in the current model settings. This allows users to fine-tune their chat experience based on their preferred prompt. --- .../Common/Settings/CurrentChatModelSettings.tsx | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/src/components/Common/Settings/CurrentChatModelSettings.tsx b/src/components/Common/Settings/CurrentChatModelSettings.tsx index cb37286..e950bd9 100644 --- a/src/components/Common/Settings/CurrentChatModelSettings.tsx +++ b/src/components/Common/Settings/CurrentChatModelSettings.tsx @@ -1,3 +1,5 @@ +import { getPromptById } from "@/db" +import { useMessageOption } from "@/hooks/useMessageOption" import { getAllModelSettings } from "@/services/model-settings" import { useStoreChatModelSettings } from "@/store/model" import { useQuery } from "@tanstack/react-query" @@ -27,10 +29,20 @@ export const CurrentChatModelSettings = ({ const { t } = useTranslation("common") const [form] = Form.useForm() const cUserSettings = useStoreChatModelSettings() + const { selectedSystemPrompt } = useMessageOption() const { isPending: isLoading } = useQuery({ queryKey: ["fetchModelConfig2", open], queryFn: async () => { const data = await getAllModelSettings() + + let tempSystemPrompt = ""; + + // i hate this method but i need this feature so badly that i need to do this + if (selectedSystemPrompt) { + const prompt = await getPromptById(selectedSystemPrompt) + tempSystemPrompt = prompt?.content ?? "" + } + form.setFieldsValue({ temperature: cUserSettings.temperature ?? data.temperature, topK: cUserSettings.topK ?? data.topK, @@ -40,7 +52,7 @@ export const CurrentChatModelSettings = ({ seed: cUserSettings.seed, numGpu: cUserSettings.numGpu ?? data.numGpu, numPredict: cUserSettings.numPredict ?? data.numPredict, - systemPrompt: cUserSettings.systemPrompt ?? "" + systemPrompt: cUserSettings.systemPrompt ?? tempSystemPrompt }) return data }, @@ -49,6 +61,7 @@ export const CurrentChatModelSettings = ({ refetchOnWindowFocus: false }) + const renderBody = () => { return ( <> From f8791a0707814a0ca2ee3e7babcb1c4ab6435515 Mon Sep 17 00:00:00 2001 From: n4ze3m Date: Sat, 9 Nov 2024 19:10:34 +0530 Subject: [PATCH 08/17] feat: Introduce temporary chat mode Adds a new "Temporary Chat" mode for quick, non-persistent conversations. The new mode is available in the header bar and will trigger a visually distinct chat experience with a temporary background color. Temporary chats do not save to the chat history and are meant for short, one-off interactions. This feature enhances flexibility and provides a more convenient option for users who need to quickly interact with the AI without committing the conversation to their history. --- src/assets/locale/da/option.json | 3 +- src/assets/locale/de/option.json | 3 +- src/assets/locale/en/option.json | 3 +- src/assets/locale/es/option.json | 3 +- src/assets/locale/fa/option.json | 3 +- src/assets/locale/fr/option.json | 3 +- src/assets/locale/it/option.json | 3 +- src/assets/locale/ja-JP/option.json | 3 +- src/assets/locale/ko/option.json | 3 +- src/assets/locale/ml/option.json | 3 +- src/assets/locale/no/option.json | 3 +- src/assets/locale/pt-BR/option.json | 3 +- src/assets/locale/ru/option.json | 3 +- src/assets/locale/sv/option.json | 3 +- src/assets/locale/zh/option.json | 3 +- src/components/Layouts/Header.tsx | 20 +++--- src/components/Layouts/NewChat.tsx | 57 +++++++++++++++++ .../Option/Playground/PlaygroundForm.tsx | 21 ++++--- src/components/Option/Sidebar.tsx | 7 ++- src/hooks/useMessageOption.tsx | 61 ++++++++++++++----- src/store/option.tsx | 8 ++- 21 files changed, 167 insertions(+), 52 deletions(-) create mode 100644 src/components/Layouts/NewChat.tsx diff --git a/src/assets/locale/da/option.json b/src/assets/locale/da/option.json index 4548680..5fc0281 100644 --- a/src/assets/locale/da/option.json +++ b/src/assets/locale/da/option.json @@ -8,5 +8,6 @@ "somethingWentWrong": "Noget gik galt", "validationSelectModel": "Venligst vælg en model for at forsæætte", "deleteHistoryConfirmation": "Er du sikker på at du vil slette denne historik?", - "editHistoryTitle": "Indtast en ny titel" + "editHistoryTitle": "Indtast en ny titel", + "temporaryChat": "Midlertidig Chat" } \ No newline at end of file diff --git a/src/assets/locale/de/option.json b/src/assets/locale/de/option.json index f6ec7f9..4303931 100644 --- a/src/assets/locale/de/option.json +++ b/src/assets/locale/de/option.json @@ -8,5 +8,6 @@ "somethingWentWrong": "Etwas ist schiefgelaufen", "validationSelectModel": "Bitte wähle ein Modell aus, um fortzufahren", "deleteHistoryConfirmation": "Bist du sicher, dass du diesen Verlauf löschen möchtest?", - "editHistoryTitle": "Gib einen neuen Titel ein" + "editHistoryTitle": "Gib einen neuen Titel ein", + "temporaryChat": "Temporärer Chat" } \ No newline at end of file diff --git a/src/assets/locale/en/option.json b/src/assets/locale/en/option.json index 5739b40..7d11938 100644 --- a/src/assets/locale/en/option.json +++ b/src/assets/locale/en/option.json @@ -8,5 +8,6 @@ "somethingWentWrong": "Something went wrong", "validationSelectModel": "Please select a model to continue", "deleteHistoryConfirmation": "Are you sure you want to delete this history?", - "editHistoryTitle": "Enter a new title" + "editHistoryTitle": "Enter a new title", + "temporaryChat": "Temporary Chat" } \ No newline at end of file diff --git a/src/assets/locale/es/option.json b/src/assets/locale/es/option.json index 3c48761..d9ab8cf 100644 --- a/src/assets/locale/es/option.json +++ b/src/assets/locale/es/option.json @@ -8,5 +8,6 @@ "somethingWentWrong": "Hubo un error", "validationSelectModel": "Selecione un modelo para continuar", "deleteHistoryConfirmation": "¿Esta seguro que quiere borrar éste histórico?", - "editHistoryTitle": "Ingrese un nuevo título" + "editHistoryTitle": "Ingrese un nuevo título", + "temporaryChat": "Chat Temporal" } diff --git a/src/assets/locale/fa/option.json b/src/assets/locale/fa/option.json index 9188710..5278cd0 100644 --- a/src/assets/locale/fa/option.json +++ b/src/assets/locale/fa/option.json @@ -8,5 +8,6 @@ "somethingWentWrong": "مشکلی پیش آمد", "validationSelectModel": "لطفا یک مدل را برای ادامه انتخاب کنید", "deleteHistoryConfirmation": "آیا مطمئن هستید که می خواهید این تاریخچه را حذف کنید؟", - "editHistoryTitle": "یک عنوان جدید وارد کنید" + "editHistoryTitle": "یک عنوان جدید وارد کنید", + "temporaryChat": "گپ موقت" } diff --git a/src/assets/locale/fr/option.json b/src/assets/locale/fr/option.json index ec40c0e..ad374aa 100644 --- a/src/assets/locale/fr/option.json +++ b/src/assets/locale/fr/option.json @@ -8,5 +8,6 @@ "somethingWentWrong": "Quelque chose s'est mal passé", "validationSelectModel": "Veuillez sélectionner un modèle pour continuer", "deleteHistoryConfirmation": "Êtes-vous sûr de vouloir supprimer cette historique ?", - "editHistoryTitle": "Entrez un nouveau titre" + "editHistoryTitle": "Entrez un nouveau titre", + "temporaryChat": "Chat temporaire" } \ No newline at end of file diff --git a/src/assets/locale/it/option.json b/src/assets/locale/it/option.json index 6fcd098..fb222c3 100644 --- a/src/assets/locale/it/option.json +++ b/src/assets/locale/it/option.json @@ -8,5 +8,6 @@ "somethingWentWrong": "Qualcosa è andato storto", "validationSelectModel": "Scegliere un modello per continuare", "deleteHistoryConfirmation": "Sei sicuro che vuoi eliminare la cronologia?", - "editHistoryTitle": "Inserisci un nuovo titolo" + "editHistoryTitle": "Inserisci un nuovo titolo", + "temporaryChat": "Chat Temporanea" } \ No newline at end of file diff --git a/src/assets/locale/ja-JP/option.json b/src/assets/locale/ja-JP/option.json index 57b023b..76dc343 100644 --- a/src/assets/locale/ja-JP/option.json +++ b/src/assets/locale/ja-JP/option.json @@ -8,5 +8,6 @@ "somethingWentWrong": "何かが間違っています", "validationSelectModel": "続行するにはモデルを選択してください", "deleteHistoryConfirmation": "この履歴を削除しますか?", - "editHistoryTitle": "新しいタイトルを入力" + "editHistoryTitle": "新しいタイトルを入力", + "temporaryChat": "一時的なチャット" } \ No newline at end of file diff --git a/src/assets/locale/ko/option.json b/src/assets/locale/ko/option.json index 21a5048..2db30cc 100644 --- a/src/assets/locale/ko/option.json +++ b/src/assets/locale/ko/option.json @@ -8,5 +8,6 @@ "somethingWentWrong": "문제가 발생했습니다", "validationSelectModel": "계속하려면 모델을 선택하세요", "deleteHistoryConfirmation": "이 기록을 삭제하시겠습니까?", - "editHistoryTitle": "새 제목 입력" + "editHistoryTitle": "새 제목 입력", + "temporaryChat": "임시 채팅" } diff --git a/src/assets/locale/ml/option.json b/src/assets/locale/ml/option.json index f8fcf70..1004b13 100644 --- a/src/assets/locale/ml/option.json +++ b/src/assets/locale/ml/option.json @@ -8,5 +8,6 @@ "somethingWentWrong": "എന്തോ തെറ്റായി", "deleteHistoryConfirmation": "നിങ്ങളുടെ ചാറ്റ് ചരിത്രം ഇല്ലാതാക്കണമെന്ന് തീർച്ചയാണോ?", "editHistoryTitle": "ചാറ്റ് title എഡിറ്റുചെയ്യുക", - "validationSelectModel": "തുടരുന്നതിന് ഒരു മോഡല്‍ തിരഞ്ഞെടുക്കുക" + "validationSelectModel": "തുടരുന്നതിന് ഒരു മോഡല്‍ തിരഞ്ഞെടുക്കുക", + "temporaryChat": "താൽക്കാലിക ചാറ്റ്" } \ No newline at end of file diff --git a/src/assets/locale/no/option.json b/src/assets/locale/no/option.json index 76d335e..64adb7b 100644 --- a/src/assets/locale/no/option.json +++ b/src/assets/locale/no/option.json @@ -8,5 +8,6 @@ "somethingWentWrong": "Noe gikk galt", "validationSelectModel": "Vennligst velg en modell for å fortsette", "deleteHistoryConfirmation": "Er du sikker på at du vil slette denne historikken?", - "editHistoryTitle": "Skriv inn en ny tittel" + "editHistoryTitle": "Skriv inn en ny tittel", + "temporaryChat": "Midlertidig Chat" } diff --git a/src/assets/locale/pt-BR/option.json b/src/assets/locale/pt-BR/option.json index de29ff1..eeffe1c 100644 --- a/src/assets/locale/pt-BR/option.json +++ b/src/assets/locale/pt-BR/option.json @@ -8,5 +8,6 @@ "somethingWentWrong": "Algo deu errado", "validationSelectModel": "Por favor, selecione um modelo para continuar", "deleteHistoryConfirmation": "Tem certeza de que deseja excluir este histórico?", - "editHistoryTitle": "Digite um novo título" + "editHistoryTitle": "Digite um novo título", + "temporaryChat": "Chat Temporário" } \ No newline at end of file diff --git a/src/assets/locale/ru/option.json b/src/assets/locale/ru/option.json index f15c106..bd8f987 100644 --- a/src/assets/locale/ru/option.json +++ b/src/assets/locale/ru/option.json @@ -8,5 +8,6 @@ "somethingWentWrong": "Что-то пошло не так", "validationSelectModel": "Пожалуйста, выберите модель, чтобы продолжить", "deleteHistoryConfirmation": "Вы уверены, что хотите удалить эту историю?", - "editHistoryTitle": "Введите новое название" + "editHistoryTitle": "Введите новое название", + "temporaryChat": "Временный чат" } diff --git a/src/assets/locale/sv/option.json b/src/assets/locale/sv/option.json index d7017ee..98e1012 100644 --- a/src/assets/locale/sv/option.json +++ b/src/assets/locale/sv/option.json @@ -8,5 +8,6 @@ "somethingWentWrong": "Något gick fel", "validationSelectModel": "Vänligen välj en modell för att fortsätta", "deleteHistoryConfirmation": "Är du säker på att du vill radera denna historik?", - "editHistoryTitle": "Ange en ny titel" + "editHistoryTitle": "Ange en ny titel", + "temporaryChat": "Tillfällig chatt" } diff --git a/src/assets/locale/zh/option.json b/src/assets/locale/zh/option.json index 095daca..cba6731 100644 --- a/src/assets/locale/zh/option.json +++ b/src/assets/locale/zh/option.json @@ -8,5 +8,6 @@ "somethingWentWrong": "出现了错误", "validationSelectModel": "请选择一个模型以继续", "deleteHistoryConfirmation": "你确定要删除这个历史记录吗?", - "editHistoryTitle": "输入一个新的标题" + "editHistoryTitle": "输入一个新的标题", + "temporaryChat": "临时聊天" } \ No newline at end of file diff --git a/src/components/Layouts/Header.tsx b/src/components/Layouts/Header.tsx index 3d3ba89..7f3e153 100644 --- a/src/components/Layouts/Header.tsx +++ b/src/components/Layouts/Header.tsx @@ -21,6 +21,7 @@ import { Select, Tooltip } from "antd" import { getAllPrompts } from "@/db" import { ShareBtn } from "~/components/Common/ShareBtn" import { ProviderIcons } from "../Common/ProviderIcon" +import { NewChat } from "./NewChat" type Props = { setSidebarOpen: (open: boolean) => void setOpenModelSettings: (open: boolean) => void @@ -45,12 +46,12 @@ export const Header: React.FC = ({ setSelectedSystemPrompt, messages, streaming, - historyId + historyId, + temporaryChat } = useMessageOption() const { data: models, isLoading: isModelsLoading, - isFetching: isModelsFetching } = useQuery({ queryKey: ["fetchModel"], queryFn: () => fetchChatModels({ returnEmpty: true }), @@ -86,7 +87,9 @@ export const Header: React.FC = ({ } return ( -
+
{pathname !== "/" && (
@@ -104,14 +107,9 @@ export const Header: React.FC = ({
-
- -
+ {"/"} diff --git a/src/components/Layouts/NewChat.tsx b/src/components/Layouts/NewChat.tsx new file mode 100644 index 0000000..e20681a --- /dev/null +++ b/src/components/Layouts/NewChat.tsx @@ -0,0 +1,57 @@ +import { SquarePen, MoreHorizontal, TimerReset } from "lucide-react" +import { useTranslation } from "react-i18next" +import { Dropdown, Switch } from "antd" +import type { MenuProps } from "antd" +import { useMessageOption } from "@/hooks/useMessageOption" + +type Props = { + clearChat: () => void +} + +export const NewChat: React.FC = ({ clearChat }) => { + const { t } = useTranslation(["option", "common"]) + + const { temporaryChat, setTemporaryChat, messages } = useMessageOption() + + const items: MenuProps["items"] = [ + { + key: "1", + label: ( + + ) + } + ] + return ( +
+ + + + +
+ ) +} diff --git a/src/components/Option/Playground/PlaygroundForm.tsx b/src/components/Option/Playground/PlaygroundForm.tsx index 4e0d637..5f19861 100644 --- a/src/components/Option/Playground/PlaygroundForm.tsx +++ b/src/components/Option/Playground/PlaygroundForm.tsx @@ -36,7 +36,8 @@ export const PlaygroundForm = ({ dropedFile }: Props) => { selectedQuickPrompt, textareaRef, setSelectedQuickPrompt, - selectedKnowledge + selectedKnowledge, + temporaryChat } = useMessageOption() const isMobile = () => { @@ -190,7 +191,10 @@ export const PlaygroundForm = ({ dropedFile }: Props) => { } return ( -
+
{
-
+
{ stopListening() @@ -228,7 +234,10 @@ export const PlaygroundForm = ({ dropedFile }: Props) => { return } } - if (value.message.trim().length === 0 && value.image.length === 0) { + if ( + value.message.trim().length === 0 && + value.image.length === 0 + ) { return } form.reset() @@ -288,8 +297,6 @@ export const PlaygroundForm = ({ dropedFile }: Props) => { )}
- - {!selectedKnowledge && (
) -} \ No newline at end of file +} diff --git a/src/components/Option/Sidebar.tsx b/src/components/Option/Sidebar.tsx index 1c9fd54..6e6881d 100644 --- a/src/components/Option/Sidebar.tsx +++ b/src/components/Option/Sidebar.tsx @@ -34,7 +34,8 @@ export const Sidebar = ({ onClose }: Props) => { setHistoryId, historyId, clearChat, - setSelectedModel + setSelectedModel, + temporaryChat } = useMessageOption() const { t } = useTranslation(["option", "common"]) const client = useQueryClient() @@ -126,7 +127,7 @@ export const Sidebar = ({ onClose }: Props) => { }) return ( -
+
{status === "success" && chatHistories.length === 0 && (
@@ -244,4 +245,4 @@ export const Sidebar = ({ onClose }: Props) => { )}
) -} +} \ No newline at end of file diff --git a/src/hooks/useMessageOption.tsx b/src/hooks/useMessageOption.tsx index 959cb44..b35e2e1 100644 --- a/src/hooks/useMessageOption.tsx +++ b/src/hooks/useMessageOption.tsx @@ -22,7 +22,10 @@ import { notification } from "antd" import { getSystemPromptForWeb } from "~/web/web" import { generateHistory } from "@/utils/generate-history" import { useTranslation } from "react-i18next" -import { saveMessageOnError, saveMessageOnSuccess } from "./chat-helper" +import { + saveMessageOnError as saveError, + saveMessageOnSuccess as saveSuccess +} from "./chat-helper" import { usePageAssist } from "@/context" import { PageAssistVectorStore } from "@/libs/PageAssistVectorStore" import { formatDocs } from "@/chain/chat-with-x" @@ -65,7 +68,9 @@ export const useMessageOption = () => { selectedSystemPrompt, setSelectedSystemPrompt, selectedKnowledge, - setSelectedKnowledge + setSelectedKnowledge, + temporaryChat, + setTemporaryChat } = useStoreMessageOption() const currentChatModelSettings = useStoreChatModelSettings() const [selectedModel, setSelectedModel] = useStorage("selectedModel") @@ -123,8 +128,9 @@ export const useMessageOption = () => { seed: currentChatModelSettings?.seed, numGpu: currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu, - numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict, - + numPredict: + currentChatModelSettings?.numPredict ?? + userDefaultModelSettings?.numPredict }) let newMessage: Message[] = [] @@ -199,9 +205,11 @@ export const useMessageOption = () => { userDefaultModelSettings?.numCtx, seed: currentChatModelSettings?.seed, numGpu: - currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu, - numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict, - + currentChatModelSettings?.numGpu ?? + userDefaultModelSettings?.numGpu, + numPredict: + currentChatModelSettings?.numPredict ?? + userDefaultModelSettings?.numPredict }) const response = await questionOllama.invoke(promptForQuestion) query = response.content.toString() @@ -355,6 +363,22 @@ export const useMessageOption = () => { } } + const saveMessageOnSuccess = async (e: any) => { + if (!temporaryChat) { + return await saveSuccess(e) + } + + return true + } + + const saveMessageOnError = async (e: any) => { + if (!temporaryChat) { + return await saveError(e) + } + + return true + } + const normalChatMode = async ( message: string, image: string, @@ -386,7 +410,9 @@ export const useMessageOption = () => { seed: currentChatModelSettings?.seed, numGpu: currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu, - numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict, + numPredict: + currentChatModelSettings?.numPredict ?? + userDefaultModelSettings?.numPredict }) let newMessage: Message[] = [] @@ -501,7 +527,7 @@ export const useMessageOption = () => { } } } - ], + ] } ) @@ -622,8 +648,9 @@ export const useMessageOption = () => { seed: currentChatModelSettings?.seed, numGpu: currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu, - numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict, - + numPredict: + currentChatModelSettings?.numPredict ?? + userDefaultModelSettings?.numPredict }) let newMessage: Message[] = [] @@ -714,9 +741,11 @@ export const useMessageOption = () => { userDefaultModelSettings?.numCtx, seed: currentChatModelSettings?.seed, numGpu: - currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu, - numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict, - + currentChatModelSettings?.numGpu ?? + userDefaultModelSettings?.numGpu, + numPredict: + currentChatModelSettings?.numPredict ?? + userDefaultModelSettings?.numPredict }) const response = await questionOllama.invoke(promptForQuestion) query = response.content.toString() @@ -1038,6 +1067,8 @@ export const useMessageOption = () => { textareaRef, selectedKnowledge, setSelectedKnowledge, - ttsEnabled + ttsEnabled, + temporaryChat, + setTemporaryChat, } } diff --git a/src/store/option.tsx b/src/store/option.tsx index 8a55501..49911cd 100644 --- a/src/store/option.tsx +++ b/src/store/option.tsx @@ -65,6 +65,9 @@ type State = { setSpeechToTextLanguage: (language: string) => void speechToTextLanguage: string + + temporaryChat: boolean + setTemporaryChat: (temporaryChat: boolean) => void } export const useStoreMessageOption = create((set) => ({ @@ -102,5 +105,8 @@ export const useStoreMessageOption = create((set) => ({ setSelectedQuickPrompt: (selectedQuickPrompt) => set({ selectedQuickPrompt }), selectedKnowledge: null, - setSelectedKnowledge: (selectedKnowledge) => set({ selectedKnowledge }) + setSelectedKnowledge: (selectedKnowledge) => set({ selectedKnowledge }), + + temporaryChat: false, + setTemporaryChat: (temporaryChat) => set({ temporaryChat }), })) From 9c7a3f5ddc10079ded309d75850b74489e495bc2 Mon Sep 17 00:00:00 2001 From: n4ze3m Date: Sun, 10 Nov 2024 12:37:48 +0530 Subject: [PATCH 09/17] fix: Prevent errors from optional fields in chat history and chunk content The code was relying on optional fields like `content` in chat history and chunk objects, leading to potential errors if these fields were missing. This commit ensures proper handling of these fields by adding optional chaining (`?.`) for safer access. This prevents crashes and ensures the application handles the missing fields gracefully. --- src/chain/chat-with-website.ts | 7 ++++--- src/chain/chat-with-x.ts | 1 + src/hooks/useMessage.tsx | 16 ++++++++-------- src/hooks/useMessageOption.tsx | 12 ++++++------ 4 files changed, 19 insertions(+), 17 deletions(-) diff --git a/src/chain/chat-with-website.ts b/src/chain/chat-with-website.ts index 3b909d4..1c6b5d3 100644 --- a/src/chain/chat-with-website.ts +++ b/src/chain/chat-with-website.ts @@ -1,3 +1,4 @@ +//@ts-nocheck import { BaseLanguageModel } from "langchain/base_language"; import { Document } from "@langchain/core/documents"; import { @@ -28,8 +29,8 @@ export function groupMessagesByConversation(messages: ChatHistory) { const groupedMessages = []; for (let i = 0; i < messages.length; i += 2) { groupedMessages.push({ - human: messages[i].content, - ai: messages[i + 1].content, + human: messages[i]?.content, + ai: messages[i + 1]?.content, }); } @@ -38,7 +39,7 @@ export function groupMessagesByConversation(messages: ChatHistory) { const formatChatHistoryAsString = (history: BaseMessage[]) => { return history - .map((message) => `${message._getType()}: ${message.content}`) + .map((message) => `${message._getType()}: ${message?.content}`) .join("\n"); }; diff --git a/src/chain/chat-with-x.ts b/src/chain/chat-with-x.ts index 4a63829..296277f 100644 --- a/src/chain/chat-with-x.ts +++ b/src/chain/chat-with-x.ts @@ -1,3 +1,4 @@ +//@ts-nocheck import { BaseLanguageModel } from "@langchain/core/language_models/base" import { Document } from "@langchain/core/documents" import { diff --git a/src/hooks/useMessage.tsx b/src/hooks/useMessage.tsx index cc38949..d06102d 100644 --- a/src/hooks/useMessage.tsx +++ b/src/hooks/useMessage.tsx @@ -355,8 +355,8 @@ export const useMessage = () => { ) let count = 0 for await (const chunk of chunks) { - contentToSave += chunk.content - fullText += chunk.content + contentToSave += chunk?.content + fullText += chunk?.content if (count === 0) { setIsProcessing(true) } @@ -590,8 +590,8 @@ export const useMessage = () => { ) let count = 0 for await (const chunk of chunks) { - contentToSave += chunk.content - fullText += chunk.content + contentToSave += chunk?.content + fullText += chunk?.content if (count === 0) { setIsProcessing(true) } @@ -855,8 +855,8 @@ export const useMessage = () => { ) let count = 0 for await (const chunk of chunks) { - contentToSave += chunk.content - fullText += chunk.content + contentToSave += chunk?.content + fullText += chunk?.content if (count === 0) { setIsProcessing(true) } @@ -1064,8 +1064,8 @@ export const useMessage = () => { }) let count = 0 for await (const chunk of chunks) { - contentToSave += chunk.content - fullText += chunk.content + contentToSave += chunk?.content + fullText += chunk?.content if (count === 0) { setIsProcessing(true) } diff --git a/src/hooks/useMessageOption.tsx b/src/hooks/useMessageOption.tsx index b35e2e1..90fc06a 100644 --- a/src/hooks/useMessageOption.tsx +++ b/src/hooks/useMessageOption.tsx @@ -276,8 +276,8 @@ export const useMessageOption = () => { ) let count = 0 for await (const chunk of chunks) { - contentToSave += chunk.content - fullText += chunk.content + contentToSave += chunk?.content + fullText += chunk?.content if (count === 0) { setIsProcessing(true) } @@ -533,8 +533,8 @@ export const useMessageOption = () => { let count = 0 for await (const chunk of chunks) { - contentToSave += chunk.content - fullText += chunk.content + contentToSave += chunk?.content + fullText += chunk?.content if (count === 0) { setIsProcessing(true) } @@ -800,8 +800,8 @@ export const useMessageOption = () => { ) let count = 0 for await (const chunk of chunks) { - contentToSave += chunk.content - fullText += chunk.content + contentToSave += chunk?.content + fullText += chunk?.content if (count === 0) { setIsProcessing(true) } From f52e3d564acdb0cb83a9fe0de43876fa7752e518 Mon Sep 17 00:00:00 2001 From: n4ze3m Date: Sun, 10 Nov 2024 13:29:41 +0530 Subject: [PATCH 10/17] Fix: Handle image URLs in custom model responses Improves the formatting of image URLs within responses from custom models. This ensures that image URLs are correctly presented in the user interface. --- src/utils/human-message.tsx | 69 +++++++++++++++++++------------------ 1 file changed, 36 insertions(+), 33 deletions(-) diff --git a/src/utils/human-message.tsx b/src/utils/human-message.tsx index 6712339..9c1200f 100644 --- a/src/utils/human-message.tsx +++ b/src/utils/human-message.tsx @@ -1,43 +1,46 @@ import { isCustomModel } from "@/db/models" import { HumanMessage, type MessageContent } from "@langchain/core/messages" - type HumanMessageType = { - content: MessageContent, - model: string + content: MessageContent + model: string } export const humanMessageFormatter = ({ content, model }: HumanMessageType) => { - - const isCustom = isCustomModel(model) - - if(isCustom) { - if(typeof content !== 'string') { - if(content.length > 1) { - // this means that we need to reformat the image_url - const newContent: MessageContent = [ - { - type: "text", - //@ts-ignore - text: content[0].text - }, - { - type: "image_url", - image_url: { - //@ts-ignore - url: content[1].image_url - } - } - ] + const isCustom = isCustomModel(model) - return new HumanMessage({ - content: newContent - }) + if (isCustom) { + if (typeof content !== "string") { + if (content.length > 1) { + // this means that we need to reformat the image_url + const newContent: MessageContent = [ + { + type: "text", + //@ts-ignore + text: content[0].text + }, + { + type: "image_url", + image_url: { + //@ts-ignore + url: content[1].image_url } - } + } + ] + + return new HumanMessage({ + content: newContent + }) + } else { + return new HumanMessage({ + //@ts-ignore + content: content[0].text + }) + } } - - return new HumanMessage({ - content, - }) -} \ No newline at end of file + } + + return new HumanMessage({ + content + }) +} From c6a62126dd2c14917dc4a25de90617e747f7ebbe Mon Sep 17 00:00:00 2001 From: n4ze3m Date: Sun, 10 Nov 2024 14:02:44 +0530 Subject: [PATCH 11/17] feat: Add LlamaFile support Add support for LlamaFile, a new model provider that allows users to interact with models stored in LlamaFile format. This includes: - Adding an icon for LlamaFile in the provider selection menu. - Updating the model provider selection to include LlamaFile. - Updating the model handling logic to properly identify and process LlamaFile models. - Updating the API providers list to include LlamaFile. This enables users to leverage the capabilities of LlamaFile models within the application. --- src/components/Common/ProviderIcon.tsx | 3 + src/components/Icons/Llamafile.tsx | 24 +++++++ src/components/Option/Settings/openai.tsx | 3 +- src/db/models.ts | 84 ++++++++++++++++++++++- src/utils/oai-api-providers.ts | 5 ++ 5 files changed, 117 insertions(+), 2 deletions(-) create mode 100644 src/components/Icons/Llamafile.tsx diff --git a/src/components/Common/ProviderIcon.tsx b/src/components/Common/ProviderIcon.tsx index 38ba504..8142adc 100644 --- a/src/components/Common/ProviderIcon.tsx +++ b/src/components/Common/ProviderIcon.tsx @@ -6,6 +6,7 @@ import { LMStudioIcon } from "../Icons/LMStudio" import { OpenAiIcon } from "../Icons/OpenAI" import { TogtherMonoIcon } from "../Icons/Togther" import { OpenRouterIcon } from "../Icons/OpenRouter" +import { LLamaFile } from "../Icons/Llamafile" export const ProviderIcons = ({ provider, @@ -31,6 +32,8 @@ export const ProviderIcons = ({ return case "openrouter": return + case "llamafile": + return default: return } diff --git a/src/components/Icons/Llamafile.tsx b/src/components/Icons/Llamafile.tsx new file mode 100644 index 0000000..734cdd9 --- /dev/null +++ b/src/components/Icons/Llamafile.tsx @@ -0,0 +1,24 @@ +// copied logo from Hugging Face webiste +import React from "react" + +export const LLamaFile = React.forwardRef< + SVGSVGElement, + React.SVGProps +>((props, ref) => { + return ( + + + + ) +}) diff --git a/src/components/Option/Settings/openai.tsx b/src/components/Option/Settings/openai.tsx index 273b1ce..fe4dcdb 100644 --- a/src/components/Option/Settings/openai.tsx +++ b/src/components/Option/Settings/openai.tsx @@ -47,7 +47,8 @@ export const OpenAIApp = () => { }) setOpen(false) message.success(t("addSuccess")) - if (provider !== "lmstudio") { + const noPopupProvider = ["lmstudio", "llamafile"] + if (!noPopupProvider.includes(provider)) { setOpenaiId(data) setOpenModelModal(true) } diff --git a/src/db/models.ts b/src/db/models.ts index 4575c67..eaa7650 100644 --- a/src/db/models.ts +++ b/src/db/models.ts @@ -24,6 +24,7 @@ export const removeModelSuffix = (id: string) => { return id .replace(/_model-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{3,4}-[a-f0-9]{4}/, "") .replace(/_lmstudio_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/, "") + .replace(/_llamafile_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/, "") } export const isLMStudioModel = (model: string) => { const lmstudioModelRegex = @@ -31,6 +32,12 @@ export const isLMStudioModel = (model: string) => { return lmstudioModelRegex.test(model) } +export const isLlamafileModel = (model: string) => { + const llamafileModelRegex = + /_llamafile_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/ + return llamafileModelRegex.test(model) +} + export const getLMStudioModelId = ( model: string ): { model_id: string; provider_id: string } => { @@ -44,10 +51,29 @@ export const getLMStudioModelId = ( } return null } + +export const getLlamafileModelId = ( + model: string +): { model_id: string; provider_id: string } => { + const llamafileModelRegex = + /_llamafile_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/ + const match = model.match(llamafileModelRegex) + if (match) { + const modelId = match[0] + const providerId = match[0].replace("_llamafile_openai-", "") + return { model_id: modelId, provider_id: providerId } + } + return null +} export const isCustomModel = (model: string) => { if (isLMStudioModel(model)) { return true } + + if (isLlamafileModel(model)) { + return true + } + const customModelRegex = /_model-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{3,4}-[a-f0-9]{4}/ return customModelRegex.test(model) @@ -201,6 +227,25 @@ export const getModelInfo = async (id: string) => { } } + + if (isLlamafileModel(id)) { + const llamafileId = getLlamafileModelId(id) + if (!llamafileId) { + throw new Error("Invalid LMStudio model ID") + } + return { + model_id: id.replace( + /_llamafile_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/, + "" + ), + provider_id: `openai-${llamafileId.provider_id}`, + name: id.replace( + /_llamafile_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/, + "" + ) + } + } + const model = await db.getById(id) return model } @@ -264,6 +309,27 @@ export const dynamicFetchLMStudio = async ({ return lmstudioModels } +export const dynamicFetchLlamafile = async ({ + baseUrl, + providerId +}: { + baseUrl: string + providerId: string +}) => { + const models = await getAllOpenAIModels(baseUrl) + const llamafileModels = models.map((e) => { + return { + name: e?.name || e?.id, + id: `${e?.id}_llamafile_${providerId}`, + provider: providerId, + lookup: `${e?.id}_${providerId}`, + provider_id: providerId + } + }) + + return llamafileModels +} + export const ollamaFormatAllCustomModels = async ( modelType: "all" | "chat" | "embedding" = "all" ) => { @@ -276,6 +342,10 @@ export const ollamaFormatAllCustomModels = async ( (provider) => provider.provider === "lmstudio" ) + const llamafileProviders = allProviders.filter( + (provider) => provider.provider === "llamafile" + ) + const lmModelsPromises = lmstudioProviders.map((provider) => dynamicFetchLMStudio({ baseUrl: provider.baseUrl, @@ -283,16 +353,28 @@ export const ollamaFormatAllCustomModels = async ( }) ) + const llamafileModelsPromises = llamafileProviders.map((provider) => + dynamicFetchLlamafile({ + baseUrl: provider.baseUrl, + providerId: provider.id + }) + ) + const lmModelsFetch = await Promise.all(lmModelsPromises) + const llamafileModelsFetch = await Promise.all(llamafileModelsPromises) + const lmModels = lmModelsFetch.flat() + const llamafileModels = llamafileModelsFetch.flat() + // merge allModels and lmModels const allModlesWithLMStudio = [ ...(modelType !== "all" ? allModles.filter((model) => model.model_type === modelType) : allModles), - ...lmModels + ...lmModels, + ...llamafileModels ] const ollamaModels = allModlesWithLMStudio.map((model) => { diff --git a/src/utils/oai-api-providers.ts b/src/utils/oai-api-providers.ts index 5d65105..52bfcf9 100644 --- a/src/utils/oai-api-providers.ts +++ b/src/utils/oai-api-providers.ts @@ -4,6 +4,11 @@ export const OAI_API_PROVIDERS = [ value: "lmstudio", baseUrl: "http://localhost:1234/v1" }, + { + label: "LlamaFile", + value: "llamafile", + baseUrl: "http://127.0.0.1:8080/v1" + }, { label: "OpenAI", value: "openai", From a7f461da0b1d4af9025283f5dd82aa927fc3cc82 Mon Sep 17 00:00:00 2001 From: n4ze3m Date: Sun, 10 Nov 2024 15:31:28 +0530 Subject: [PATCH 12/17] feat: Add multiple Ollama support Adds support for using Ollama 2 as a model provider. This includes: - Adding Ollama 2 to the list of supported providers in the UI - Updating the model identification logic to properly handle Ollama 2 models - Modifying the model loading and runtime configuration to work with Ollama 2 - Implementing Ollama 2 specific functionality in the embedding and chat models This change allows users to leverage the capabilities of Ollama 2 for both embeddings and conversational AI tasks. --- src/components/Option/Settings/openai.tsx | 2 +- src/db/models.ts | 79 ++++++++++++++++++++++- src/models/embedding.ts | 2 +- src/models/index.ts | 7 +- src/utils/oai-api-providers.ts | 18 ++++-- 5 files changed, 96 insertions(+), 12 deletions(-) diff --git a/src/components/Option/Settings/openai.tsx b/src/components/Option/Settings/openai.tsx index fe4dcdb..a0807ed 100644 --- a/src/components/Option/Settings/openai.tsx +++ b/src/components/Option/Settings/openai.tsx @@ -47,7 +47,7 @@ export const OpenAIApp = () => { }) setOpen(false) message.success(t("addSuccess")) - const noPopupProvider = ["lmstudio", "llamafile"] + const noPopupProvider = ["lmstudio", "llamafile", "ollama2"] if (!noPopupProvider.includes(provider)) { setOpenaiId(data) setOpenModelModal(true) diff --git a/src/db/models.ts b/src/db/models.ts index eaa7650..012379c 100644 --- a/src/db/models.ts +++ b/src/db/models.ts @@ -25,6 +25,7 @@ export const removeModelSuffix = (id: string) => { .replace(/_model-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{3,4}-[a-f0-9]{4}/, "") .replace(/_lmstudio_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/, "") .replace(/_llamafile_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/, "") + .replace(/_ollama2_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/, "") } export const isLMStudioModel = (model: string) => { const lmstudioModelRegex = @@ -37,7 +38,11 @@ export const isLlamafileModel = (model: string) => { /_llamafile_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/ return llamafileModelRegex.test(model) } - +export const isOllamaModel = (model: string) => { + const ollamaModelRegex = + /_ollama2_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/ + return ollamaModelRegex.test(model) +} export const getLMStudioModelId = ( model: string ): { model_id: string; provider_id: string } => { @@ -51,7 +56,19 @@ export const getLMStudioModelId = ( } return null } - +export const getOllamaModelId = ( + model: string +): { model_id: string; provider_id: string } => { + const ollamaModelRegex = + /_ollama2_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/ + const match = model.match(ollamaModelRegex) + if (match) { + const modelId = match[0] + const providerId = match[0].replace("_ollama2_openai-", "") + return { model_id: modelId, provider_id: providerId } + } + return null +} export const getLlamafileModelId = ( model: string ): { model_id: string; provider_id: string } => { @@ -74,6 +91,10 @@ export const isCustomModel = (model: string) => { return true } + if (isOllamaModel(model)) { + return true + } + const customModelRegex = /_model-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{3,4}-[a-f0-9]{4}/ return customModelRegex.test(model) @@ -246,6 +267,25 @@ export const getModelInfo = async (id: string) => { } } + + if (isOllamaModel(id)) { + const ollamaId = getOllamaModelId(id) + if (!ollamaId) { + throw new Error("Invalid LMStudio model ID") + } + return { + model_id: id.replace( + /_ollama2_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/, + "" + ), + provider_id: `openai-${ollamaId.provider_id}`, + name: id.replace( + /_ollama2_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/, + "" + ) + } + } + const model = await db.getById(id) return model } @@ -309,6 +349,27 @@ export const dynamicFetchLMStudio = async ({ return lmstudioModels } +export const dynamicFetchOllama2 = async ({ + baseUrl, + providerId +}: { + baseUrl: string + providerId: string +}) => { + const models = await getAllOpenAIModels(baseUrl) + const ollama2Models = models.map((e) => { + return { + name: e?.name || e?.id, + id: `${e?.id}_ollama2_${providerId}`, + provider: providerId, + lookup: `${e?.id}_${providerId}`, + provider_id: providerId + } + }) + + return ollama2Models +} + export const dynamicFetchLlamafile = async ({ baseUrl, providerId @@ -360,21 +421,33 @@ export const ollamaFormatAllCustomModels = async ( }) ) + const ollamaModelsPromises = allProviders.map((provider) => ( + dynamicFetchOllama2({ + baseUrl: provider.baseUrl, + providerId: provider.id + }) + )) + const lmModelsFetch = await Promise.all(lmModelsPromises) const llamafileModelsFetch = await Promise.all(llamafileModelsPromises) + const ollamaModelsFetch = await Promise.all(ollamaModelsPromises) + const lmModels = lmModelsFetch.flat() const llamafileModels = llamafileModelsFetch.flat() + const ollama2Models = ollamaModelsFetch.flat() + // merge allModels and lmModels const allModlesWithLMStudio = [ ...(modelType !== "all" ? allModles.filter((model) => model.model_type === modelType) : allModles), ...lmModels, - ...llamafileModels + ...llamafileModels, + ...ollama2Models ] const ollamaModels = allModlesWithLMStudio.map((model) => { diff --git a/src/models/embedding.ts b/src/models/embedding.ts index 03eb663..0c1026b 100644 --- a/src/models/embedding.ts +++ b/src/models/embedding.ts @@ -1,4 +1,4 @@ -import { getModelInfo, isCustomModel } from "@/db/models" +import { getModelInfo, isCustomModel, isOllamaModel } from "@/db/models" import { OllamaEmbeddingsPageAssist } from "./OllamaEmbedding" import { OAIEmbedding } from "./OAIEmbedding" import { getOpenAIConfigById } from "@/db/openai" diff --git a/src/models/index.ts b/src/models/index.ts index 4798f78..135025f 100644 --- a/src/models/index.ts +++ b/src/models/index.ts @@ -1,8 +1,9 @@ -import { getModelInfo, isCustomModel } from "@/db/models" +import { getModelInfo, isCustomModel, isOllamaModel } from "@/db/models" import { ChatChromeAI } from "./ChatChromeAi" import { ChatOllama } from "./ChatOllama" import { getOpenAIConfigById } from "@/db/openai" import { ChatOpenAI } from "@langchain/openai" +import { urlRewriteRuntime } from "@/libs/runtime" export const pageAssistModel = async ({ model, @@ -43,6 +44,10 @@ export const pageAssistModel = async ({ const modelInfo = await getModelInfo(model) const providerInfo = await getOpenAIConfigById(modelInfo.provider_id) + if (isOllamaModel(model)) { + await urlRewriteRuntime(providerInfo.baseUrl || "") + } + return new ChatOpenAI({ modelName: modelInfo.model_id, openAIApiKey: providerInfo.apiKey || "temp", diff --git a/src/utils/oai-api-providers.ts b/src/utils/oai-api-providers.ts index 52bfcf9..40c1e21 100644 --- a/src/utils/oai-api-providers.ts +++ b/src/utils/oai-api-providers.ts @@ -1,14 +1,24 @@ export const OAI_API_PROVIDERS = [ + { + label: "Custom", + value: "custom", + baseUrl: "" + }, { label: "LM Studio", value: "lmstudio", baseUrl: "http://localhost:1234/v1" }, { - label: "LlamaFile", + label: "Llamafile", value: "llamafile", baseUrl: "http://127.0.0.1:8080/v1" }, + { + label: "Ollama", + value: "ollama2", + baseUrl: "http://localhost:11434/v1" + }, { label: "OpenAI", value: "openai", @@ -34,9 +44,5 @@ export const OAI_API_PROVIDERS = [ value: "openrouter", baseUrl: "https://openrouter.ai/api/v1" }, - { - label: "Custom", - value: "custom", - baseUrl: "" - } + ] \ No newline at end of file From 2409ebc75deb37ecd943b1aa63b8b859dcf669be Mon Sep 17 00:00:00 2001 From: n4ze3m Date: Sun, 10 Nov 2024 15:38:03 +0530 Subject: [PATCH 13/17] feat: Add Ollama and Llamafile to dynamic model fetching Expanded the list of providers for which models are fetched dynamically to include Ollama and Llamafile, removing the need for manual model addition in the user interface for these providers. This simplifies the user experience and ensures users always have access to the latest models without manual intervention. --- src/assets/locale/da/openai.json | 2 +- src/assets/locale/de/openai.json | 2 +- src/assets/locale/en/openai.json | 2 +- src/assets/locale/es/openai.json | 2 +- src/assets/locale/fa/openai.json | 2 +- src/assets/locale/fr/openai.json | 2 +- src/assets/locale/it/openai.json | 2 +- src/assets/locale/ja-JP/openai.json | 2 +- src/assets/locale/ko/openai.json | 2 +- src/assets/locale/ml/openai.json | 2 +- src/assets/locale/no/openai.json | 2 +- src/assets/locale/ru/openai.json | 2 +- src/assets/locale/zh/openai.json | 2 +- src/components/Option/Settings/openai.tsx | 8 +++++--- 14 files changed, 18 insertions(+), 16 deletions(-) diff --git a/src/assets/locale/da/openai.json b/src/assets/locale/da/openai.json index 3410ae6..8ef3c07 100644 --- a/src/assets/locale/da/openai.json +++ b/src/assets/locale/da/openai.json @@ -42,7 +42,7 @@ "delete": "Slet", "edit": "Rediger", "newModel": "Tilføj Modeller til Udbyder", - "noNewModel": "For LMStudio henter vi dynamisk. Ingen manuel tilføjelse nødvendig.", + "noNewModel": "For LMStudio, Ollama, Llamafile, henter vi dynamisk. Ingen manuel tilføjelse nødvendig.", "searchModel": "Søg Model", "selectAll": "Vælg Alle", "save": "Gem", diff --git a/src/assets/locale/de/openai.json b/src/assets/locale/de/openai.json index 23a684e..98e7519 100644 --- a/src/assets/locale/de/openai.json +++ b/src/assets/locale/de/openai.json @@ -42,7 +42,7 @@ "delete": "Löschen", "edit": "Bearbeiten", "newModel": "Modelle zum Anbieter hinzufügen", - "noNewModel": "Für LMStudio holen wir die Daten dynamisch. Keine manuelle Hinzufügung erforderlich.", + "noNewModel": "Für LMStudio, Ollama, Llamafile, holen wir die Daten dynamisch. Keine manuelle Hinzufügung erforderlich.", "searchModel": "Modell suchen", "selectAll": "Alle auswählen", "save": "Speichern", diff --git a/src/assets/locale/en/openai.json b/src/assets/locale/en/openai.json index c0838cb..f8ad298 100644 --- a/src/assets/locale/en/openai.json +++ b/src/assets/locale/en/openai.json @@ -42,7 +42,7 @@ "delete": "Delete", "edit": "Edit", "newModel": "Add Models to Provider", - "noNewModel": "For LMStudio, we fetch dynamically. No manual addition needed.", + "noNewModel": "For LMStudio, Ollama, Llamafile, we fetch dynamically. No manual addition needed.", "searchModel": "Search Model", "selectAll": "Select All", "save": "Save", diff --git a/src/assets/locale/es/openai.json b/src/assets/locale/es/openai.json index 2e84a19..b4bd9b2 100644 --- a/src/assets/locale/es/openai.json +++ b/src/assets/locale/es/openai.json @@ -42,7 +42,7 @@ "delete": "Eliminar", "edit": "Editar", "newModel": "Añadir Modelos al Proveedor", - "noNewModel": "Para LMStudio, obtenemos dinámicamente. No se necesita adición manual.", + "noNewModel": "Para LMStudio, Ollama, Llamafile, obtenemos dinámicamente. No se necesita adición manual.", "searchModel": "Buscar Modelo", "selectAll": "Seleccionar Todo", "save": "Guardar", diff --git a/src/assets/locale/fa/openai.json b/src/assets/locale/fa/openai.json index 479e509..7258e83 100644 --- a/src/assets/locale/fa/openai.json +++ b/src/assets/locale/fa/openai.json @@ -42,7 +42,7 @@ "delete": "حذف", "edit": "ویرایش", "newModel": "افزودن مدل‌ها به ارائه‌دهنده", - "noNewModel": "برای LMStudio، ما به صورت پویا دریافت می‌کنیم. نیازی به افزودن دستی نیست.", + "noNewModel": "برای LMStudio, Ollama, Llamafile, ما به صورت پویا دریافت می‌کنیم. نیازی به افزودن دستی نیست.", "searchModel": "جستجوی مدل", "selectAll": "انتخاب همه", "save": "ذخیره", diff --git a/src/assets/locale/fr/openai.json b/src/assets/locale/fr/openai.json index a71256c..2d9a0c2 100644 --- a/src/assets/locale/fr/openai.json +++ b/src/assets/locale/fr/openai.json @@ -42,7 +42,7 @@ "delete": "Supprimer", "edit": "Modifier", "newModel": "Ajouter des modèles au fournisseur", - "noNewModel": "Pour LMStudio, nous récupérons dynamiquement. Pas besoin d'ajout manuel.", + "noNewModel": "Pour LMStudio, Ollama, Llamafile, nous récupérons dynamiquement. Pas besoin d'ajout manuel.", "searchModel": "Rechercher un modèle", "selectAll": "Tout sélectionner", "save": "Enregistrer", diff --git a/src/assets/locale/it/openai.json b/src/assets/locale/it/openai.json index c9d27ef..589da51 100644 --- a/src/assets/locale/it/openai.json +++ b/src/assets/locale/it/openai.json @@ -42,7 +42,7 @@ "delete": "Elimina", "edit": "Modifica", "newModel": "Aggiungi Modelli al Provider", - "noNewModel": "Per LMStudio, recuperiamo dinamicamente. Non è necessaria l'aggiunta manuale.", + "noNewModel": "Per LMStudio, Ollama, Llamafile, recuperiamo dinamicamente. Non è necessaria l'aggiunta manuale.", "searchModel": "Cerca Modello", "selectAll": "Seleziona Tutto", "save": "Salva", diff --git a/src/assets/locale/ja-JP/openai.json b/src/assets/locale/ja-JP/openai.json index 829e886..2e755e8 100644 --- a/src/assets/locale/ja-JP/openai.json +++ b/src/assets/locale/ja-JP/openai.json @@ -42,7 +42,7 @@ "delete": "削除", "edit": "編集", "newModel": "プロバイダーにモデルを追加", - "noNewModel": "LMStudioの場合、動的に取得します。手動での追加は不要です。", + "noNewModel": "LMStudio, Ollama, Llamafile,の場合、動的に取得します。手動での追加は不要です。", "searchModel": "モデルを検索", "selectAll": "すべて選択", "save": "保存", diff --git a/src/assets/locale/ko/openai.json b/src/assets/locale/ko/openai.json index 13bc431..fae6aca 100644 --- a/src/assets/locale/ko/openai.json +++ b/src/assets/locale/ko/openai.json @@ -42,7 +42,7 @@ "delete": "삭제", "edit": "편집", "newModel": "공급자에 모델 추가", - "noNewModel": "LMStudio의 경우 동적으로 가져옵니다. 수동 추가는 필요하지 않습니다.", + "noNewModel": "LMStudio, Ollama, Llamafile,의 경우 동적으로 가져옵니다. 수동 추가는 필요하지 않습니다.", "searchModel": "모델 검색", "selectAll": "모두 선택", "save": "저장", diff --git a/src/assets/locale/ml/openai.json b/src/assets/locale/ml/openai.json index 6fe486e..5bbaaea 100644 --- a/src/assets/locale/ml/openai.json +++ b/src/assets/locale/ml/openai.json @@ -42,7 +42,7 @@ "delete": "ഇല്ലാതാക്കുക", "edit": "തിരുത്തുക", "newModel": "ദാതാവിലേക്ക് മോഡലുകൾ ചേർക്കുക", - "noNewModel": "LMStudio-യ്ക്കായി, ഞങ്ങൾ ഡൈനാമിക്കായി ലഭ്യമാക്കുന്നു. മാനുവലായി ചേർക്കേണ്ടതില്ല.", + "noNewModel": "LMStudio, Ollama, Llamafile-യ്ക്കായി, ഞങ്ങൾ ഡൈനാമിക്കായി ലഭ്യമാക്കുന്നു. മാനുവലായി ചേർക്കേണ്ടതില്ല.", "searchModel": "മോഡൽ തിരയുക", "selectAll": "എല്ലാം തിരഞ്ഞെടുക്കുക", "save": "സംരക്ഷിക്കുക", diff --git a/src/assets/locale/no/openai.json b/src/assets/locale/no/openai.json index 91ddc3d..aa748bb 100644 --- a/src/assets/locale/no/openai.json +++ b/src/assets/locale/no/openai.json @@ -42,7 +42,7 @@ "delete": "Slett", "edit": "Rediger", "newModel": "Legg til modeller for leverandør", - "noNewModel": "For LMStudio henter vi dynamisk. Ingen manuell tillegging nødvendig.", + "noNewModel": "For LMStudio, Ollama, Llamafile, henter vi dynamisk. Ingen manuell tillegging nødvendig.", "searchModel": "Søk etter modell", "selectAll": "Velg alle", "save": "Lagre", diff --git a/src/assets/locale/ru/openai.json b/src/assets/locale/ru/openai.json index 436c676..2b63545 100644 --- a/src/assets/locale/ru/openai.json +++ b/src/assets/locale/ru/openai.json @@ -42,7 +42,7 @@ "delete": "Удалить", "edit": "Редактировать", "newModel": "Добавить модели к провайдеру", - "noNewModel": "Для LMStudio мы загружаем динамически. Ручное добавление не требуется.", + "noNewModel": "Для LMStudio, Ollama, Llamafile, мы загружаем динамически. Ручное добавление не требуется.", "searchModel": "Поиск модели", "selectAll": "Выбрать все", "save": "Сохранить", diff --git a/src/assets/locale/zh/openai.json b/src/assets/locale/zh/openai.json index 1b02184..00583f3 100644 --- a/src/assets/locale/zh/openai.json +++ b/src/assets/locale/zh/openai.json @@ -42,7 +42,7 @@ "delete": "删除", "edit": "编辑", "newModel": "向提供商添加模型", - "noNewModel": "对于 LMStudio,我们动态获取。无需手动添加。", + "noNewModel": "对于 LMStudio, Ollama, Llamafile,我们动态获取。无需手动添加。", "searchModel": "搜索模型", "selectAll": "全选", "save": "保存", diff --git a/src/components/Option/Settings/openai.tsx b/src/components/Option/Settings/openai.tsx index a0807ed..b44694f 100644 --- a/src/components/Option/Settings/openai.tsx +++ b/src/components/Option/Settings/openai.tsx @@ -23,6 +23,7 @@ import { } from "lucide-react" import { OpenAIFetchModel } from "./openai-fetch-model" import { OAI_API_PROVIDERS } from "@/utils/oai-api-providers" +const noPopupProvider = ["lmstudio", "llamafile", "ollama2"] export const OpenAIApp = () => { const { t } = useTranslation("openai") @@ -47,7 +48,6 @@ export const OpenAIApp = () => { }) setOpen(false) message.success(t("addSuccess")) - const noPopupProvider = ["lmstudio", "llamafile", "ollama2"] if (!noPopupProvider.includes(provider)) { setOpenaiId(data) setOpenModelModal(true) @@ -158,7 +158,7 @@ export const OpenAIApp = () => { @@ -168,7 +168,9 @@ export const OpenAIApp = () => { setOpenModelModal(true) setOpenaiId(record.id) }} - disabled={!record.id || record.provider === "lmstudio"}> + disabled={ + !record.id || noPopupProvider.includes(record.provider) + }> From 55f3838b6de58aab5b20cc6fe6adf63723ea01af Mon Sep 17 00:00:00 2001 From: n4ze3m Date: Sun, 10 Nov 2024 17:30:33 +0530 Subject: [PATCH 14/17] feat: Improve ollama2 model fetching This commit introduces a more efficient approach to fetching ollama2 models, ensuring proper filtering and handling of providers. This enhances the robustness and reliability of the model loading process, streamlining the overall user experience. --- src/db/models.ts | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/db/models.ts b/src/db/models.ts index 012379c..928beb9 100644 --- a/src/db/models.ts +++ b/src/db/models.ts @@ -407,6 +407,10 @@ export const ollamaFormatAllCustomModels = async ( (provider) => provider.provider === "llamafile" ) + const ollamaProviders = allProviders.filter( + (provider) => provider.provider === "ollama2" + ) + const lmModelsPromises = lmstudioProviders.map((provider) => dynamicFetchLMStudio({ baseUrl: provider.baseUrl, @@ -421,12 +425,11 @@ export const ollamaFormatAllCustomModels = async ( }) ) - const ollamaModelsPromises = allProviders.map((provider) => ( + const ollamaModelsPromises = ollamaProviders.map((provider) => dynamicFetchOllama2({ baseUrl: provider.baseUrl, providerId: provider.id - }) - )) + })) const lmModelsFetch = await Promise.all(lmModelsPromises) From 0f75de02cbadbb5623176276528894e71aa01344 Mon Sep 17 00:00:00 2001 From: n4ze3m Date: Sun, 10 Nov 2024 18:08:05 +0530 Subject: [PATCH 15/17] Fix: Update temporary chat history This commit addresses an issue where temporary chat history was not being updated correctly when using voice input. The `setHistoryId` function is now called within the `saveMessageOnError` function to ensure that the history ID is set correctly when a message is saved. --- src/hooks/useMessageOption.tsx | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/src/hooks/useMessageOption.tsx b/src/hooks/useMessageOption.tsx index 90fc06a..322287c 100644 --- a/src/hooks/useMessageOption.tsx +++ b/src/hooks/useMessageOption.tsx @@ -366,6 +366,8 @@ export const useMessageOption = () => { const saveMessageOnSuccess = async (e: any) => { if (!temporaryChat) { return await saveSuccess(e) + } else { + setHistoryId("temp") } return true @@ -374,6 +376,21 @@ export const useMessageOption = () => { const saveMessageOnError = async (e: any) => { if (!temporaryChat) { return await saveError(e) + } else { + setHistory([ + ...history, + { + role: "user", + content: e.userMessage, + image: e.image + }, + { + role: "assistant", + content: e.botMessage + } + ]) + + setHistoryId("temp") } return true @@ -1069,6 +1086,6 @@ export const useMessageOption = () => { setSelectedKnowledge, ttsEnabled, temporaryChat, - setTemporaryChat, + setTemporaryChat } } From a96193bbf804e8bb8dadfbfa9372cccbddc1b5a4 Mon Sep 17 00:00:00 2001 From: n4ze3m Date: Sun, 10 Nov 2024 18:14:44 +0530 Subject: [PATCH 16/17] feat: Add error handling for updating message by index Adds error handling to the `updateMessageByIndex` function to prevent the temporary chat from breaking when an error occurs during the update process. This ensures a more robust and reliable experience for users. --- src/db/index.ts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/db/index.ts b/src/db/index.ts index e4c136c..da837b2 100644 --- a/src/db/index.ts +++ b/src/db/index.ts @@ -355,10 +355,14 @@ export const updateMessageByIndex = async ( index: number, message: string ) => { + try { const db = new PageAssitDatabase() const chatHistory = (await db.getChatHistory(history_id)).reverse() chatHistory[index].content = message await db.db.set({ [history_id]: chatHistory.reverse() }) + } catch(e) { + // temp chat will break + } } export const deleteChatForEdit = async (history_id: string, index: number) => { From 5678a0f8b288568ec782a4c09bfa6ea71c33dde3 Mon Sep 17 00:00:00 2001 From: n4ze3m Date: Sun, 10 Nov 2024 19:34:38 +0530 Subject: [PATCH 17/17] feat: Add Korean language support Add Korean language support to the application. This includes translating the necessary UI elements and adding Korean to the supported language list. --- src/i18n/index.ts | 4 +++- src/i18n/lang/ko.ts | 19 +++++++++++++++++++ src/i18n/support-language.ts | 4 ++++ 3 files changed, 26 insertions(+), 1 deletion(-) create mode 100644 src/i18n/lang/ko.ts diff --git a/src/i18n/index.ts b/src/i18n/index.ts index 97bbf6f..effddb4 100644 --- a/src/i18n/index.ts +++ b/src/i18n/index.ts @@ -14,6 +14,7 @@ import { de } from "./lang/de"; import { da } from "./lang/da"; import { no } from "./lang/no"; import { sv } from "./lang/sv"; +import { ko } from "./lang/ko"; i18n @@ -37,7 +38,8 @@ i18n da: da, no: no, de: de, - sv: sv + sv: sv, + ko: ko, }, fallbackLng: "en", lng: localStorage.getItem("i18nextLng") || "en", diff --git a/src/i18n/lang/ko.ts b/src/i18n/lang/ko.ts new file mode 100644 index 0000000..037ce8f --- /dev/null +++ b/src/i18n/lang/ko.ts @@ -0,0 +1,19 @@ +import option from "@/assets/locale/ko/option.json"; +import playground from "@/assets/locale/ko/playground.json"; +import common from "@/assets/locale/ko/common.json"; +import sidepanel from "@/assets/locale/ko/sidepanel.json"; +import settings from "@/assets/locale/ko/settings.json"; +import knowledge from "@/assets/locale/ko/knowledge.json"; +import chrome from "@/assets/locale/ko/chrome.json"; +import openai from "@/assets/locale/ko/openai.json"; + +export const ko = { + option, + playground, + common, + sidepanel, + settings, + knowledge, + chrome, + openai +} \ No newline at end of file diff --git a/src/i18n/support-language.ts b/src/i18n/support-language.ts index 2c62ad4..273964d 100644 --- a/src/i18n/support-language.ts +++ b/src/i18n/support-language.ts @@ -55,5 +55,9 @@ export const supportLanguage = [ { value: "sv", label: "Svenska" + }, + { + value: "ko", + label: "한국어" } ]