diff --git a/.github/workflows/docker-develop.yaml b/.github/workflows/docker-develop.yaml new file mode 100644 index 0000000..f9c3949 --- /dev/null +++ b/.github/workflows/docker-develop.yaml @@ -0,0 +1,37 @@ +# This file was automatically generated. +# Please, do not edit it manually. + +name: Build Docker Image develop + +on: + release: + types: + - prereleased + +permissions: + id-token: write + contents: read + +jobs: + build: + environment: develop + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: 'Az CLI login' + uses: azure/login@v1 + with: + client-id: ${{ secrets.AZURE_CLIENT_ID }} + tenant-id: ${{ secrets.AZURE_TENANT_ID }} + subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }} + + - name: 'Login to ACR' + run: | + az acr login --name ${{ vars.DOCKER_REGISTRY }} + + - name: 'Build & push image' + run: | + docker build -t ${{ vars.DOCKER_REGISTRY }}/face-recognition:develop . + docker push ${{ vars.DOCKER_REGISTRY }}/face-recognition:develop diff --git a/.github/workflows/docker-production.yaml b/.github/workflows/docker-production.yaml new file mode 100644 index 0000000..dfa95a8 --- /dev/null +++ b/.github/workflows/docker-production.yaml @@ -0,0 +1,37 @@ +# This file was automatically generated. +# Please, do not edit it manually. + +name: Build Docker Image production + +on: + release: + types: + - released + +permissions: + id-token: write + contents: read + +jobs: + build: + environment: production + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: 'Az CLI login' + uses: azure/login@v1 + with: + client-id: ${{ secrets.AZURE_CLIENT_ID }} + tenant-id: ${{ secrets.AZURE_TENANT_ID }} + subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }} + + - name: 'Login to ACR' + run: | + az acr login --name ${{ vars.DOCKER_REGISTRY }} + + - name: 'Build & push image' + run: | + docker build -t ${{ vars.DOCKER_REGISTRY }}/face-recognition:production . + docker push ${{ vars.DOCKER_REGISTRY }}/face-recognition:production diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..5254f04 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,23 @@ +# Copyright 2021-2024 Avaiga Private Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. + +FROM python:3.11 + +WORKDIR /app + +# Install application dependencies. +COPY src/requirements.txt . +RUN pip install -r requirements.txt + +# Copy the application source code. +COPY src . + +CMD ["taipy", "run", "--no-debug", "--no-reloader", "main.py", "-H", "0.0.0.0", "-P", "5000"] diff --git a/README.md b/README.md index 50750dc..44091fe 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,9 @@ What this application demonstrates: * How to build a complex custom UI component for Taipy. * How to detect and recognize faces in the image in real time using OpenCV. +## Usage + +A Webcam component is shown at the center of the application. It can detect faces and identify them. To be able to recognize your face, capture multiple photos of your face (click on the "Capture" button) and write your name when the dialog appears. At the end, click "Retrain" and the model should now recognize your face. ## Demo Type @@ -24,14 +27,15 @@ Check out our [`GETTING_STARTED.md`](docs/GETTING_STARTED.md) file. ## Directory Structure -- `main.py`: Main file containing the demo application code. -- `demo/`: Contains additional demo source code. - - `demo/faces.py`: Contains the code to do face detection and face recognition. - - `src/image.py`: Contains shared facility functions. -- `webcam/`: Contains custom component code. The directory contains the Python files to declare the custom component to Taipy. - - `webcam/webui`: Contains the TypeScript source code for the custom React component. -- `classifiers`: Contains the OpenCV classifiers used in the app for face detection. -- `images`: Contains the files to train the face detection of the demo. This folder is created at first startup. All image captures will go into this directory. +- `src/`: Main folder for the application code + - `main.py`: Main file containing the demo application code. + - `demo/`: Contains additional demo source code. + - `demo/faces.py`: Contains the code to do face detection and face recognition. + - `src/image.py`: Contains shared facility functions. + - `webcam/`: Contains custom component code. The directory contains the Python files to declare the custom component to Taipy. + - `webcam/webui`: Contains the TypeScript source code for the custom React component. + - `classifiers`: Contains the OpenCV classifiers used in the app for face detection. + - `images`: Contains the files to train the face detection of the demo. This folder is created at first startup. All image captures will go into this directory. - `docs/`: contains the images for the documentation - `CODE_OF_CONDUCT.md`: Code of conduct for members and contributors of _demo-covid-dashboard_. - `CONTRIBUTING.md`: Instructions to contribute to _demo-covid-dashboard_. diff --git a/contributors.txt b/contributors.txt new file mode 100644 index 0000000..db54771 --- /dev/null +++ b/contributors.txt @@ -0,0 +1,2 @@ +gmarabout +FlorianJacta diff --git a/docs/INSTALLATION.md b/docs/INSTALLATION.md index ff7a677..42a78b5 100644 --- a/docs/INSTALLATION.md +++ b/docs/INSTALLATION.md @@ -9,29 +9,46 @@ To install this demo: ``` git clone git@github.com:Avaiga/demo-demo-face-recognition.git -cd demo-face-recognition +cd demo-face-recognition/src ``` To install the dependencies: ``` -pipenv install +pip install -r requirements.txt ``` -or, if you want to develop in the demo: +## Building the Webam component + +- Clone the taipy-2-2 branch to build the front-end JavaScript, + +``` +pip install taipy==2.2 +pip install opencv-python-headless==4.7.0.72 +pip install opencv-contrib-python-headless==4.7.0.72 +pip install pillow +``` + +- Run this command: + +``` +cd demo-face-recognition/webcam/webui +npm i +``` + +- Find the location of taipy-gui with the `find_taipy_gui_dir.py` script and run: + ``` -pipenv install --dev + npm i ``` -Then you need to build the front-end JavaScript: +- Change `webpack.config.js` with taipy-gui path and run: ``` -cd taipy-demo-face-recognition/webcam/webui -npm install -npm build +npm run build ``` -And finally, to run the demo: +Finally, to run the demo: ``` -pipenv run python main.py +python main.py ``` diff --git a/src/main.py b/src/main.py index b91cae4..6274ac7 100644 --- a/src/main.py +++ b/src/main.py @@ -10,20 +10,10 @@ from pathlib import Path from demo.faces import detect_faces, recognize_face, train_face_recognizer - logging.basicConfig(level=logging.DEBUG) training_data_folder = Path("images") -show_capture_dialog = False -capture_image = False -show_add_captured_images_dialog = False - -labeled_faces = [] # Contains rect with label (for UI component) - -captured_image = None -captured_label = "" - def on_action_captured_image(state, id, payload): print("Captured image") @@ -102,7 +92,17 @@ def button_retrain_clicked(state): notify(state, "s", "Retrained!") -webcam_md = """<|toggle|theme|> +if __name__ == "__main__": + show_capture_dialog = False + capture_image = False + show_add_captured_images_dialog = False + + labeled_faces = [] # Contains rect with label (for UI component) + + captured_image = None + captured_label = "" + + webcam_md = """<|toggle|theme|> |> -""" + """ -if __name__ == "__main__": # Create dir where the pictures will be stored if not training_data_folder.exists(): training_data_folder.mkdir() @@ -147,4 +146,5 @@ def button_retrain_clicked(state): gui = Gui(webcam_md) gui.add_library(Webcam()) - gui.run(title='Face Recognition') \ No newline at end of file + gui.run(title='Face Recognition') +