Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Developed a chatbot using Hugging Face api and using pretrained model stable diffusion and also enhanced the UI by adding more animations and react-icons #42

Open
wants to merge 8 commits into
base: main
Choose a base branch
from
1,287 changes: 1,287 additions & 0 deletions examples/huggingface/Copy_of_DreamBooth_Stable_Diffusion.ipynb

Large diffs are not rendered by default.

18 changes: 11 additions & 7 deletions examples/huggingface/main.py
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

remove your hugging face API key from code. its not safe

Original file line number Diff line number Diff line change
@@ -1,17 +1,21 @@
import textbase

import textbase
from textbase.message import Message
from textbase import models
import os
from typing import List
import transformers

#load your HuggingFace API key
models.HuggingFace.api_key = ""
# or from environment variable:
# Load your HuggingFace API key
models.HuggingFace.api_key = "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"
# or load from an environment variable:
# models.HuggingFace.api_key = os.getenv("HUGGING_FACE_API_KEY")

# Prompt for the model
SYSTEM_PROMPT = """you are and expert in large language model (llm) field and you will answer accordingly"""
SYSTEM_PROMPT = """you are an expert in the large language model (LLM) field and you will answer accordingly"""

# Specify the Hugging Face model name
model_name = "runwayml/stable-diffusion-v1-5"

@textbase.chatbot("talking-bot")
def on_message(message_history: List[Message], state: dict = None):
Expand All @@ -27,11 +31,11 @@ def on_message(message_history: List[Message], state: dict = None):
else:
state["counter"] += 1

#Generate Hugging face model response
# Generate Hugging Face model response
bot_response = models.HuggingFace.generate(
system_prompt=SYSTEM_PROMPT,
message_history=message_history,
model="jasondubon/HubermanGPT-small-v1"
model=model_name
)

return bot_response, state
50 changes: 43 additions & 7 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,14 @@
from textbase import models
import os
from typing import List
import json
import requests


# Load your HuggingFace API key
models.HuggingFace.api_key = "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"
# or load from an environment variable:
# models.HuggingFace.api_key = os.getenv("HUGGING_FACE_API_KEY")

# Load your OpenAI API key
models.OpenAI.api_key = os.getenv("OPENAI_API_KEY")
Expand All @@ -13,6 +21,8 @@
SYSTEM_PROMPT = """You are chatting with an AI. There are no specific prefixes for responses, so you can ask or talk about anything you like. The AI will respond in a natural, conversational manner. Feel free to start the conversation with any question or topic, and let's have a pleasant chat!
"""

# Prompt for the model
SYSTEM_PROMPT = """you are an expert in the large language model (LLM) field and you will answer accordingly"""

@textbase.chatbot("talking-bot")
def on_message(message_history: List[Message], state: dict = None):
Expand All @@ -28,11 +38,37 @@ def on_message(message_history: List[Message], state: dict = None):
else:
state["counter"] += 1

# # Generate GPT-3.5 Turbo response
bot_response = models.OpenAI.generate(
system_prompt=SYSTEM_PROMPT,
message_history=message_history,
model="gpt-3.5-turbo",
)
try:
assert models.HuggingFace.api_key is not None, "Hugging Face API key is not set"

headers = {"Authorization": f"Bearer {models.HuggingFace.api_key}"}
API_URL = "https://api-inference.huggingface.co/models/gpt-3.5-turbo"

inputs = {
"past_user_inputs": [SYSTEM_PROMPT],
"generated_responses": [],
"text": ""
}

for message in message_history:
if message.role == "user":
inputs["past_user_inputs"].append(message.content)
else:
inputs["generated_responses"].append(message.content)

inputs["text"] = inputs["past_user_inputs"].pop(-1)
payload = {
"inputs": inputs,
"max_length": 3000,
"temperature": 0.7,
}
data = json.dumps(payload)
response = requests.request("POST", API_URL, headers=headers, data=data)
response = json.loads(response.content.decode("utf-8"))

bot_response = response["generated_text"]
return bot_response, state

return bot_response, state
except Exception as ex:
error_message = f"Error: {ex}"
return error_message, state
43 changes: 2 additions & 41 deletions textbase/backend.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
# textbase/backend.py
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
from fastapi.responses import HTMLResponse
Expand All @@ -15,8 +14,6 @@

load_dotenv()

from .message import Message

app = FastAPI()

origins = [
Expand All @@ -34,65 +31,31 @@
allow_headers=["*"],
)


@app.get("/", response_class=HTMLResponse)
async def read_root():
"""
The `read_root` function reads and returns the contents of an HTML file specified by the path
"textbase/frontend/index.html".
:return: The content of the "index.html" file located in the "textbase/frontend" directory is being
returned.
"""
with open("textbase/frontend/dist/index.html") as f:
return f.read()


def get_module_from_file_path(file_path: str):
"""
The function `get_module_from_file_path` takes a file path as input, loads the module from the file,
and returns the module.

:param file_path: The file path is the path to the Python file that you want to import as a module.
It should be a string representing the absolute or relative path to the file
:type file_path: str
:return: the module that is loaded from the given file path.
"""
module_name = os.path.splitext(os.path.basename(file_path))[0]
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = module
spec.loader.exec_module(module)
return module

# Set the FILE_PATH environment variable to the location of your chatbot logic module
os.environ["FILE_PATH"] = "textbase\examples\huggingface\main.py"

@app.post("/chat", response_model=dict)
async def chat(messages: List[Message], state: dict = None):
"""
The above function is a Python API endpoint that receives a list of messages and a state dictionary,
loads a module from a file path, calls the on_message function from the module with the messages and
state, and returns the bot messages generated by the module.

:param messages: The `messages` parameter is a list of `Message` objects. It represents the messages
exchanged between the user and the chatbot. Each `Message` object typically contains information
such as the text of the message, the sender, the timestamp, etc
:type messages: List[Message]
:param state: The `state` parameter is a dictionary that stores the state of the conversation. It
can be used to store information or context that needs to be maintained across multiple requests or
messages in the conversation. The `state` parameter is optional and can be set to `None` if not
needed
:type state: dict
:return: a list of `Message` objects.
"""
file_path = os.environ.get("FILE_PATH", None)
logging.info(file_path)
if not file_path:
return []

module = get_module_from_file_path(file_path)

print("here", state)

# Call the on_message function from the dynamically loaded module
response = module.on_message(messages, state)
if type(response) is tuple:
bot_response, new_state = response
Expand All @@ -103,8 +66,6 @@ async def chat(messages: List[Message], state: dict = None):
elif type(response) is str:
return {"botResponse": {"content": response, "role": "assistant"}}


# Mount the static directory (frontend files)
app.mount(
"/assets",
StaticFiles(directory="textbase/frontend/dist/assets", html=True),
Expand Down
36 changes: 27 additions & 9 deletions textbase/frontend/src/App.css
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,7 @@
text-align: center;
}

.logo {
height: 6em;
padding: 1.5em;
will-change: filter;
transition: filter 300ms;
}
.logo:hover {
filter: drop-shadow(0 0 2em #646cffaa);
}

.logo.react:hover {
filter: drop-shadow(0 0 2em #61dafbaa);
}
Expand All @@ -26,6 +18,32 @@
transform: rotate(360deg);
}
}
@import "~react-icons/fa";
.icon-sun {
font-size: 24px;
margin-right: 8px;
}

.icon-moon {
font-size: 24px;
margin-right: 8px;
}

.icon-plane {
font-size: 20px;
}

.icon-star {
font-size: 20px;
color: gold;
margin-left: 8px;
}
.icon-plane,
.icon-star {
font-size: 1.2rem;
margin-right: 8px;
color: #333;
}

@media (prefers-reduced-motion: no-preference) {
a:nth-of-type(2) .logo {
Expand Down
118 changes: 111 additions & 7 deletions textbase/frontend/src/App.tsx
Original file line number Diff line number Diff line change
@@ -1,13 +1,117 @@
// App.tsx
import { ThemeProvider } from './components/ThemeContext';
import Chatbot from './components/ChatBot';

import React, { useState, useRef, useEffect } from "react";
import ReactMarkdown from "react-markdown";
import remarkGfm from "remark-gfm";
//import { FaSun, FaMoon, FaPaperPlane, FaStar } from "react-icons/fa";
import {GoStar} from "react-icons/go";
import {GoStarFill} from "react-icons/go";
import "./App.css";

type Message = {
content: string;
role: "user" | "assistant";
};

function ChatMessage(props: { message: Message }) {
return (
<div className={`chat-message ${props.message.role}`}>
<div className="message-content">
<ReactMarkdown className="prose" remarkPlugins={[remarkGfm]}>
{props.message.content}
</ReactMarkdown>
</div>
</div>
);
}

function App() {
const [input, setInput] = useState<string>("");
const [botState, setBotState] = useState<object>({});
const [history, setHistory] = useState<Message[]>([]);
const [darkMode, setDarkMode] = useState<boolean>(false);

const chatEndRef = useRef<HTMLDivElement>(null);

useEffect(() => {
if (chatEndRef.current) {
chatEndRef.current.scrollIntoView({ behavior: "smooth" });
}
}, [history]);

async function chatRequest(history: Message[], botState: object) {
try {
const response = await fetch("http://localhost:4000/chat", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({ messages: history, state: botState }),
});
const content: { botResponse: Message; newState: object } =
await response.json();
setHistory([...history, content.botResponse]);
setBotState(content.newState);
} catch (error) {
console.error("Failed to send chat history:", error);
}
}

function chatInputHandler() {
if (!input) {
return;
}
const newMessage: Message = {
content: input,
role: "user",
};
setHistory([...history, newMessage]);
setInput("");
chatRequest([...history, newMessage], botState);
}

return (
<ThemeProvider>

<Chatbot />
</ThemeProvider>
<div className={`app ${darkMode ? "dark" : "light"}`}>
<div className="chat-container">
<div className="chat-history">
{history.map((message, idx) => (
<ChatMessage key={idx} message={message} />
))}
<div ref={chatEndRef}></div>
</div>
<div className={`chat-input ${darkMode ? "dark" : "light"}`}>
<input
type="text"
placeholder="Type your message..."
value={input}
onChange={(e: React.ChangeEvent<HTMLInputElement>) => {
setInput(e.target.value);
}}
onKeyDown={(e: React.KeyboardEvent<HTMLInputElement>) => {
if (e.key === "Enter") {
chatInputHandler();
}
}}
/>
<button
className={`send-button ${darkMode ? "dark" : "light"}`}
onClick={chatInputHandler}
>
<GoStarFill className="icon-moon" />
</button>
<div className="star-icon">
<GoStar className="icon-star" />
</div>
</div>
</div>
<button
className={`toggle-theme-button ${darkMode ? "dark" : "light"}`}
onClick={() => {
setDarkMode((prev) => !prev);
}}
>
{darkMode ? <GoStar className="icon-sun" /> : <GoStarFill className="icon-moon" />}
</button>
</div>
);
}

Expand Down
Loading