Last active
November 29, 2018 02:39
-
-
Save jaisonfdo/63111f76015ebb7ceec0c8dac4088046 to your computer and use it in GitHub Desktop.
MLKit face detection demo explanation code snippet
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
implementation 'com.google.firebase:firebase-ml-vision:18.0.1' | |
implementation 'com.google.firebase:firebase-ml-vision-face-model:17.0.2' | |
<meta-data | |
android:name="com.google.firebase.ml.vision.DEPENDENCIES" | |
android:value="face" /> | |
// To initialise the detector | |
FirebaseVisionFaceDetectorOptions options = | |
new FirebaseVisionFaceDetectorOptions.Builder() | |
.setPerformanceMode(FirebaseVisionFaceDetectorOptions.ACCURATE) | |
.setClassificationMode(FirebaseVisionFaceDetectorOptions.ALL_CLASSIFICATIONS) | |
.enableTracking() | |
.build(); | |
FirebaseVisionFaceDetector detector = FirebaseVision.getInstance() | |
.getVisionFaceDetector(options); | |
// To create the FirebaseVisionImage | |
FirebaseVisionImageMetadata metadata = | |
new FirebaseVisionImageMetadata.Builder() | |
.setFormat(FirebaseVisionImageMetadata.IMAGE_FORMAT_NV21) | |
.setWidth(frameMetadata.getWidth()) | |
.setHeight(frameMetadata.getHeight()) | |
.setRotation(frameMetadata.getRotation()) | |
.build(); | |
Bitmap bitmap = BitmapUtils.getBitmap(data, frameMetadata); | |
FirebaseVisionImage firebaseVisionImage=FirebaseVisionImage.fromByteBuffer(data, metadata); | |
// To connect the camera resource with the detector | |
mCameraSource = new CameraSource(this, barcodeOverlay); | |
mCameraSource.setFacing(CameraSource.CAMERA_FACING_FRONT); | |
// To draw only the bounding rect around the face in the canvas | |
faceDetectionProcessor = new FaceDetectionProcessor(detector); | |
/* To plot the landmarks of the face in the canvas | |
For this you have to set ContourMode attribute as ALL_CONTOURS by using | |
setContourMode(FirebaseVisionFaceDetectorOptions.ALL_CONTOURS) */ | |
faceDetectionProcessor = new FaceContourDetectorProcessor(detector); | |
// Callback interface to receive the result | |
faceDetectionProcessor.setFaceDetectionResultListener(getFaceDetectionListener()); | |
mCameraSource.setMachineLearningFrameProcessor(faceDetectionProcessor); | |
// To detect the faces from the image | |
@Override | |
protected Task<List<FirebaseVisionFace>> detectInImage(FirebaseVisionImage image) { | |
return detector.detectInImage(image); | |
} | |
// To decode the face detection results | |
FaceDetectionResultListener faceDetectionResultListener = new FaceDetectionResultListener() { | |
@Override | |
public void onSuccess(@Nullable Bitmap originalCameraImage, @NonNull List<FirebaseVisionFace> faces, | |
@NonNull FrameMetadata frameMetadata, @NonNull GraphicOverlay graphicOverlay) { | |
boolean isEnable; | |
isEnable = faces.size() > 0; | |
for (FirebaseVisionFace face : faces){ | |
// To get the results | |
Log.d(TAG, "Face bounds : " + face.getBoundingBox()); | |
// To get this, we have to set the ClassificationMode attribute as ALL_CLASSIFICATIONS | |
Log.d(TAG, "Left eye open probability : " + face.getLeftEyeOpenProbability()); | |
Log.d(TAG, "Right eye open probability : " + face.getRightEyeOpenProbability()); | |
Log.d(TAG, "Smiling probability : " + face.getSmilingProbability()); | |
// To get this, we have to enableTracking | |
Log.d(TAG, "Face ID : " + face.getTrackingId()); | |
} | |
} | |
@Override | |
public void onFailure(@NonNull Exception e) { | |
} | |
}; | |
// To connect the camera resource with the detector | |
mCameraSource = new CameraSource(this, barcodeOverlay); | |
mCameraSource.setFacing(CameraSource.CAMERA_FACING_FRONT); | |
// Initialise & set the processor | |
mCameraSource.setMachineLearningFrameProcessor(faceDetectionProcessor); | |
// To draw only the bounding rect around the face in the canvas | |
faceDetectionProcessor = new FaceDetectionProcessor(detector); | |
faceDetectionProcessor.setFaceDetectionResultListener(getFaceDetectionListener()); | |
mCameraSource.setMachineLearningFrameProcessor(faceDetectionProcessor); | |
/* To plot the landmarks of the face in the canvas | |
For this you have to set ContourMode attribute as ALL_CONTOURS by using | |
setContourMode(FirebaseVisionFaceDetectorOptions.ALL_CONTOURS) */ | |
faceDetectionProcessor = new FaceContourDetectorProcessor(detector); | |
faceDetectionProcessor.setFaceDetectionResultListener(getFaceDetectionListener()); | |
mCameraSource.setMachineLearningFrameProcessor(faceDetectionProcessor); | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
implementation 'com.google.firebase:firebase-ml-vision:18.0.1' | |
implementation 'com.google.firebase:firebase-ml-vision-face-model:17.0.2' | |
<meta-data | |
android:name="com.google.firebase.ml.vision.DEPENDENCIES" | |
android:value="face" /> | |
// To initialise the detector | |
FirebaseVisionFaceDetectorOptions options = | |
new FirebaseVisionFaceDetectorOptions.Builder() | |
.setPerformanceMode(FirebaseVisionFaceDetectorOptions.ACCURATE) | |
.setClassificationMode(FirebaseVisionFaceDetectorOptions.ALL_CLASSIFICATIONS) | |
.enableTracking() | |
.build(); | |
FirebaseVisionFaceDetector detector = FirebaseVision.getInstance() | |
.getVisionFaceDetector(options); | |
// To create the FirebaseVisionImage | |
FirebaseVisionImageMetadata metadata = | |
new FirebaseVisionImageMetadata.Builder() | |
.setFormat(FirebaseVisionImageMetadata.IMAGE_FORMAT_NV21) | |
.setWidth(frameMetadata.getWidth()) | |
.setHeight(frameMetadata.getHeight()) | |
.setRotation(frameMetadata.getRotation()) | |
.build(); | |
Bitmap bitmap = BitmapUtils.getBitmap(data, frameMetadata); | |
FirebaseVisionImage firebaseVisionImage=FirebaseVisionImage.fromByteBuffer(data, metadata); | |
// To connect the camera resource with the detector | |
mCameraSource = new CameraSource(this, barcodeOverlay); | |
mCameraSource.setFacing(CameraSource.CAMERA_FACING_FRONT); | |
// To draw only the bounding rect around the face in the canvas | |
faceDetectionProcessor = new FaceDetectionProcessor(detector); | |
/* To plot the landmarks of the face in the canvas | |
For this you have to set ContourMode attribute as ALL_CONTOURS by using | |
setContourMode(FirebaseVisionFaceDetectorOptions.ALL_CONTOURS) */ | |
faceDetectionProcessor = new FaceContourDetectorProcessor(detector); | |
// Callback interface to receive the result | |
faceDetectionProcessor.setFaceDetectionResultListener(getFaceDetectionListener()); | |
mCameraSource.setMachineLearningFrameProcessor(faceDetectionProcessor); | |
// To detect the faces from the image | |
@Override | |
protected Task<List<FirebaseVisionFace>> detectInImage(FirebaseVisionImage image) { | |
return detector.detectInImage(image); | |
} | |
// To decode the face detection results | |
FaceDetectionResultListener faceDetectionResultListener = new FaceDetectionResultListener() { | |
@Override | |
public void onSuccess(@Nullable Bitmap originalCameraImage, @NonNull List<FirebaseVisionFace> faces, | |
@NonNull FrameMetadata frameMetadata, @NonNull GraphicOverlay graphicOverlay) { | |
boolean isEnable; | |
isEnable = faces.size() > 0; | |
for (FirebaseVisionFace face : faces){ | |
// To get the results | |
Log.d(TAG, "Face bounds : " + face.getBoundingBox()); | |
// To get this, we have to set the ClassificationMode attribute as ALL_CLASSIFICATIONS | |
Log.d(TAG, "Left eye open probability : " + face.getLeftEyeOpenProbability()); | |
Log.d(TAG, "Right eye open probability : " + face.getRightEyeOpenProbability()); | |
Log.d(TAG, "Smiling probability : " + face.getSmilingProbability()); | |
// To get this, we have to enableTracking | |
Log.d(TAG, "Face ID : " + face.getTrackingId()); | |
} | |
} | |
@Override | |
public void onFailure(@NonNull Exception e) { | |
} | |
}; | |
// To connect the camera resource with the detector | |
mCameraSource = new CameraSource(this, barcodeOverlay); | |
mCameraSource.setFacing(CameraSource.CAMERA_FACING_FRONT); | |
// Initialise & set the processor | |
mCameraSource.setMachineLearningFrameProcessor(faceDetectionProcessor); | |
// To draw only the bounding rect around the face in the canvas | |
faceDetectionProcessor = new FaceDetectionProcessor(detector); | |
faceDetectionProcessor.setFaceDetectionResultListener(getFaceDetectionListener()); | |
mCameraSource.setMachineLearningFrameProcessor(faceDetectionProcessor); | |
/* To plot the landmarks of the face in the canvas | |
For this you have to set ContourMode attribute as ALL_CONTOURS by using | |
setContourMode(FirebaseVisionFaceDetectorOptions.ALL_CONTOURS) */ | |
faceDetectionProcessor = new FaceContourDetectorProcessor(detector); | |
faceDetectionProcessor.setFaceDetectionResultListener(getFaceDetectionListener()); | |
mCameraSource.setMachineLearningFrameProcessor(faceDetectionProcessor); | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment