Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Migrated to Google MLKit #24

Open
wants to merge 7 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
74 changes: 74 additions & 0 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
# This is a basic workflow to help you get started with Actions

name: CI

# Controls when the workflow will run
on:
# Triggers the workflow on push or pull request events but only for the main branch
push:
branches: [ master ]
pull_request:
branches: [ master ]

# # Allows you to run this workflow manually from the Actions tab
# workflow_dispatch:

# A workflow run is made up of one or more jobs that can run sequentially or in parallel

jobs:
version:
name: Create version number
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
# - name: Fetch all history for all tags and branches
# run: |
# git fetch --prune --depth=10000
# - name: Install GitVersion
# uses: gittools/actions/gitversion/[email protected]
# with:
# versionSpec: '5.2.x'
# - name: Use GitVersion
# id: gitversion
# uses: gittools/actions/gitversion/[email protected]
# - name: Create version.txt with nuGetVersion
# run: echo ${{ steps.gitversion.outputs.nuGetVersion }} > version.txt
# - name: Upload version.txt
# uses: actions/upload-artifact@v2
# with:
# name: gitversion
# path: version.txt
build:
name: Build APK and Create release
needs: [ version ]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- uses: actions/setup-java@v1
with:
java-version: '11.x'
- uses: subosito/flutter-action@v1
with:
flutter-version: '2.5.3'
# - name: Get version.txt
# uses: actions/download-artifact@v2
# with:
# name: gitversion
# - name: Create new file without newline char from version.txt
# run: tr -d '\n' < version.txt > version1.txt
# - name: Read version
# id: version
# uses: juliangruber/read-file-action@v1
# with:
# path: version1.txt
- run: flutter pub get
# - run: flutter test
- run: flutter build apk --release --split-per-abi
# - run: flutter build appbundle
- name: Create a Release in GitHub
uses: ncipollo/[email protected]
with:
artifacts: "build/app/outputs/apk/release/*.apk,build/app/outputs/bundle/release/app-release.aab"
token: ${{ secrets.GH_TOKEN }}
tag: v1.0.${{ github.run_number }}
commit: ${{ github.sha }}
9 changes: 5 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,16 @@
alt="Face recognition">
# Face Recognition Flutter

Realtime face recognition flutter app.
Realtime face recognition flutter app update with latest null safety implementation and the depreceated Firebase ML Vision has been migrated to Google ML Kit.

This base repo has been forked from https://github.com/Rajatkalsotra/Face-Recognition-Flutter.

[Download](https://github.com/Rajatkalsotra/Face-Recognition-with-Flutter/raw/master/FaceRecognition.apk) apk file.

## Steps

### Face detection

Used Firebase ML Vision to detect faces .
Uses Google ML Kit

### Face Recognition

Expand All @@ -21,7 +22,7 @@ Convert Tensorflow implementation of [MobileFaceNet](https://github.com/sirius-a
**Step 1:** Download or clone this repo:

```
git clone https://github.com/Rajatkalsotra/Face-Recognition-Flutter.git
git clone https://github.com/rjvysakh/Face-Recognition-Flutter
```

**Step 2:** Go to project root and execute the following command in console to get the required dependencies:
Expand Down
25 changes: 14 additions & 11 deletions lib/detector_painters.dart
Original file line number Diff line number Diff line change
@@ -1,14 +1,17 @@
import 'dart:ui';
import 'package:firebase_ml_vision/firebase_ml_vision.dart';
// import 'package:firebase_ml_vision/firebase_ml_vision.dart';
import 'package:flutter/foundation.dart';
import 'package:flutter/material.dart';
import 'package:google_ml_kit/google_ml_kit.dart';

class FaceDetectorPainter extends CustomPainter {
FaceDetectorPainter(this.imageSize, this.results);
final Size imageSize;
double scaleX, scaleY;
dynamic results;
Face face;
late double scaleX;

late double scaleY;
late dynamic results;
late Face face;
@override
void paint(Canvas canvas, Size size) {
final Paint paint = Paint()
Expand Down Expand Up @@ -52,14 +55,14 @@ class FaceDetectorPainter extends CustomPainter {
}

RRect _scaleRect(
{@required Rect rect,
@required Size imageSize,
@required Size widgetSize,
double scaleX,
double scaleY}) {
{@required Rect? rect,
@required Size? imageSize,
@required Size? widgetSize,
double? scaleX,
double? scaleY}) {
return RRect.fromLTRBR(
(widgetSize.width - rect.left.toDouble() * scaleX),
rect.top.toDouble() * scaleY,
(widgetSize!.width - rect!.left.toDouble() * scaleX!),
rect.top.toDouble() * scaleY!,
widgetSize.width - rect.right.toDouble() * scaleX,
rect.bottom.toDouble() * scaleY,
Radius.circular(10));
Expand Down
66 changes: 36 additions & 30 deletions lib/main.dart
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import 'dart:convert';
import 'dart:io';
import 'package:google_ml_kit/google_ml_kit.dart';
import 'package:path_provider/path_provider.dart';
import 'package:camera/camera.dart';
import 'package:firebase_ml_vision/firebase_ml_vision.dart';
// import 'package:firebase_ml_vision/firebase_ml_vision.dart';
import 'package:flutter/material.dart';
import 'detector_painters.dart';
import 'utils.dart';
Expand All @@ -27,16 +28,16 @@ class _MyHomePage extends StatefulWidget {
}

class _MyHomePageState extends State<_MyHomePage> {
File jsonFile;
File? jsonFile;
dynamic _scanResults;
CameraController _camera;
CameraController? _camera;
var interpreter;
bool _isDetecting = false;
CameraLensDirection _direction = CameraLensDirection.front;
dynamic data = {};
double threshold = 1.0;
Directory tempDir;
List e1;
Directory? tempDir;
List? e1;
bool _faceFound = false;
final TextEditingController _name = new TextEditingController();
@override
Expand All @@ -49,15 +50,18 @@ class _MyHomePageState extends State<_MyHomePage> {
}

Future loadModel() async {
print("load");
try {
final gpuDelegateV2 = tfl.GpuDelegateV2(
options: tfl.GpuDelegateOptionsV2(
false,
tfl.TfLiteGpuInferenceUsage.fastSingleAnswer,
tfl.TfLiteGpuInferencePriority.minLatency,
tfl.TfLiteGpuInferencePriority.auto,
tfl.TfLiteGpuInferencePriority.auto,
));
options: tfl.GpuDelegateOptionsV2(),
// options: tfl.GpuDelegateOptionsV2(
// false,
// tfl.TfLiteGpuInferenceUsage.fastSingleAnswer,
// tfl.TfLiteGpuInferencePriority.minLatency,
// tfl.TfLiteGpuInferencePriority.auto,
// tfl.TfLiteGpuInferencePriority.auto,
// ),
);

var interpreterOptions = tfl.InterpreterOptions()
..addDelegate(gpuDelegateV2);
Expand All @@ -72,20 +76,21 @@ class _MyHomePageState extends State<_MyHomePage> {
await loadModel();
CameraDescription description = await getCamera(_direction);

ImageRotation rotation = rotationIntToImageRotation(
InputImageRotation rotation = rotationIntToImageRotation(
description.sensorOrientation,
);

_camera =
CameraController(description, ResolutionPreset.low, enableAudio: false);
await _camera.initialize();
await _camera!.initialize();
await Future.delayed(Duration(milliseconds: 500));
tempDir = await getApplicationDocumentsDirectory();
String _embPath = tempDir.path + '/emb.json';
String _embPath = tempDir!.path + '/emb.json';
jsonFile = new File(_embPath);
if (jsonFile.existsSync()) data = json.decode(jsonFile.readAsStringSync());
if (jsonFile!.existsSync())
data = json.decode(jsonFile!.readAsStringSync());

_camera.startImageStream((CameraImage image) {
_camera!.startImageStream((CameraImage image) {
if (_camera != null) {
if (_isDetecting) return;
_isDetecting = true;
Expand Down Expand Up @@ -123,6 +128,7 @@ class _MyHomePageState extends State<_MyHomePage> {
},
).catchError(
(_) {
print("error");
_isDetecting = false;
},
);
Expand All @@ -131,7 +137,7 @@ class _MyHomePageState extends State<_MyHomePage> {
}

HandleDetection _getDetectionMethod() {
final faceDetector = FirebaseVision.instance.faceDetector(
final faceDetector = GoogleMlKit.vision.faceDetector(
FaceDetectorOptions(
mode: FaceDetectorMode.accurate,
),
Expand All @@ -143,14 +149,14 @@ class _MyHomePageState extends State<_MyHomePage> {
const Text noResultsText = const Text('');
if (_scanResults == null ||
_camera == null ||
!_camera.value.isInitialized) {
!_camera!.value.isInitialized) {
return noResultsText;
}
CustomPainter painter;

final Size imageSize = Size(
_camera.value.previewSize.height,
_camera.value.previewSize.width,
_camera!.value.previewSize!.height,
_camera!.value.previewSize!.width,
);
painter = FaceDetectorPainter(imageSize, _scanResults);
return CustomPaint(
Expand All @@ -159,7 +165,7 @@ class _MyHomePageState extends State<_MyHomePage> {
}

Widget _buildImage() {
if (_camera == null || !_camera.value.isInitialized) {
if (_camera == null || !_camera!.value.isInitialized) {
return Center(
child: CircularProgressIndicator(),
);
Expand All @@ -172,7 +178,7 @@ class _MyHomePageState extends State<_MyHomePage> {
: Stack(
fit: StackFit.expand,
children: <Widget>[
CameraPreview(_camera),
CameraPreview(_camera!),
_buildResults(),
],
),
Expand All @@ -185,8 +191,8 @@ class _MyHomePageState extends State<_MyHomePage> {
} else {
_direction = CameraLensDirection.back;
}
await _camera.stopImageStream();
await _camera.dispose();
await _camera!.stopImageStream();
await _camera!.dispose();

setState(() {
_camera = null;
Expand Down Expand Up @@ -254,7 +260,7 @@ class _MyHomePageState extends State<_MyHomePage> {
var img = imglib.Image(width, height); // Create Image buffer
const int hexFF = 0xFF000000;
final int uvyButtonStride = image.planes[1].bytesPerRow;
final int uvPixelStride = image.planes[1].bytesPerPixel;
final int uvPixelStride = image.planes[1].bytesPerPixel!;
for (int x = 0; x < width; x++) {
for (int y = 0; y < height; y++) {
final int uvIndex =
Expand Down Expand Up @@ -283,11 +289,11 @@ class _MyHomePageState extends State<_MyHomePage> {
String _recog(imglib.Image img) {
List input = imageToByteListFloat32(img, 112, 128, 128);
input = input.reshape([1, 112, 112, 3]);
List output = List(1 * 192).reshape([1, 192]);
List output = List.filled(1 * 192, null, growable: false).reshape([1, 192]);
interpreter.run(input, output);
output = output.reshape([192]);
e1 = List.from(output);
return compare(e1).toUpperCase();
return compare(e1!).toUpperCase();
}

String compare(List currEmb) {
Expand All @@ -308,7 +314,7 @@ class _MyHomePageState extends State<_MyHomePage> {

void _resetFile() {
data = {};
jsonFile.deleteSync();
jsonFile!.deleteSync();
}

void _viewLabels() {
Expand Down Expand Up @@ -403,7 +409,7 @@ class _MyHomePageState extends State<_MyHomePage> {

void _handle(String text) {
data[text] = e1;
jsonFile.writeAsStringSync(json.encode(data));
jsonFile!.writeAsStringSync(json.encode(data));
_initializeCamera();
}
}
Loading