-
Notifications
You must be signed in to change notification settings - Fork 1
/
streamlit_app.py
112 lines (95 loc) · 4.79 KB
/
streamlit_app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
# import the rquired libraries.
import numpy as np
import cv2
from keras.models import load_model
import streamlit as st
from tensorflow import keras
from tensorflow.keras.preprocessing.image import img_to_array
from streamlit_webrtc import webrtc_streamer, VideoTransformerBase, RTCConfiguration, VideoProcessorBase, WebRtcMode
# Define the emotions.
emotion_labels = ['Angry','Disgust','Fear','Happy','Neutral', 'Sad', 'Surprise']
# Load model.
classifier =load_model('model_78.h5')
# load weights into new model
classifier.load_weights("model_weights_78.h5")
# Load face using OpenCV
try:
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
except Exception:
st.write("Error loading cascade classifiers")
class VideoTransformer(VideoTransformerBase):
def transform(self, frame):
img = frame.to_ndarray(format="bgr24")
#image gray
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(
image=img_gray, scaleFactor=1.3, minNeighbors=5)
for (x, y, w, h) in faces:
cv2.rectangle(img=img, pt1=(x, y), pt2=(
x + w, y + h), color=(0, 255, 255), thickness=2)
roi_gray = img_gray[y:y + h, x:x + w]
roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
if np.sum([roi_gray]) != 0:
roi = roi_gray.astype('float') / 255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi, axis=0)
prediction = classifier.predict(roi)[0]
maxindex = int(np.argmax(prediction))
finalout = emotion_labels[maxindex]
output = str(finalout)
label_position = (x, y-10)
cv2.putText(img, output, label_position, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
return img
def main():
# Face Analysis Application #
st.title("Real Time Face Emotion Detection Application 😠🤮😨😀😐😔😮")
activiteis = ["Home", "Live Face Emotion Detection", "About"]
choice = st.sidebar.selectbox("Select Activity", activiteis)
st.sidebar.markdown(
""" Developed by Anish Johnson
[LinkedIn](https://www.linkedin.com/in/anish-johnson-594110208/)""")
# Homepage.
if choice == "Home":
html_temp_home1 = """<div style="background-color:#FC4C02;padding:0.5px">
<h4 style="color:white;text-align:center;">
Start Your Real Time Face Emotion Detection.
</h4>
</div>
</br>"""
st.markdown(html_temp_home1, unsafe_allow_html=True)
st.write("""
* An average human spends about 10 to 15hrs a day staring at a computer screen, during which our facial expressions keep on changing.
* Sometimes we laugh, sometimes we cry, sometimes we get angry, and sometimes get scared by our face when the camera turns on accidentally.
* But ever wondered; whether the computer that we give all this attention to is even capable of recognizing these emotions?
Let's find out...
1. Click the dropdown list in the top left corner and select Live Face Emotion Detection.
2. This takes you to a page which will tell if it recognizes your emotions.
""")
# Live Face Emotion Detection.
elif choice == "Live Face Emotion Detection":
st.header("Webcam Live Feed")
st.subheader('''
Welcome to the other side of the SCREEN!!!
* Get ready with all the emotions you can express.
''')
st.write("1. Click Start to open your camera and give permission for prediction")
st.write("2. This will predict your emotion.")
st.write("3. When you done, click stop to end.")
webrtc_streamer(key="example", video_processor_factory=VideoTransformer)
# About.
elif choice == "About":
st.subheader("About this app")
html_temp_about1= """<div style="background-color:#36454F;padding:30px">
<h4 style="color:white;">
This app predicts facial emotion using a Convolutional neural network.
Which is built using Keras and Tensorflow libraries.
Face detection is achived through openCV.
</h4>
</div>
</br>
"""
st.markdown(html_temp_about1, unsafe_allow_html=True)
else:
pass
if __name__ == "__main__":
main()