diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..fd45b12 --- /dev/null +++ b/.gitignore @@ -0,0 +1,11 @@ +*.iml +.gradle +/local.properties +/.idea/caches/build_file_checksums.ser +/.idea/libraries +/.idea/modules.xml +/.idea/workspace.xml +.DS_Store +/build +/captures +.externalNativeBuild diff --git a/.idea/codeStyles/Project.xml b/.idea/codeStyles/Project.xml new file mode 100644 index 0000000..30aa626 --- /dev/null +++ b/.idea/codeStyles/Project.xml @@ -0,0 +1,29 @@ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/gradle.xml b/.idea/gradle.xml new file mode 100644 index 0000000..3ce167d --- /dev/null +++ b/.idea/gradle.xml @@ -0,0 +1,19 @@ + + + + + + \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml new file mode 100644 index 0000000..e0d5b93 --- /dev/null +++ b/.idea/misc.xml @@ -0,0 +1,38 @@ + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/runConfigurations.xml b/.idea/runConfigurations.xml new file mode 100644 index 0000000..7f68460 --- /dev/null +++ b/.idea/runConfigurations.xml @@ -0,0 +1,12 @@ + + + + + + \ No newline at end of file diff --git a/app/.gitignore b/app/.gitignore new file mode 100644 index 0000000..796b96d --- /dev/null +++ b/app/.gitignore @@ -0,0 +1 @@ +/build diff --git a/app/build.gradle b/app/build.gradle new file mode 100644 index 0000000..ad06c75 --- /dev/null +++ b/app/build.gradle @@ -0,0 +1,41 @@ +apply plugin: 'com.android.application' + +android { + compileSdkVersion 28 + defaultConfig { + applicationId "com.themon.test.mlkit_faciallandmarks" + minSdkVersion 21 + targetSdkVersion 28 + versionCode 1 + versionName "1.0" + testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner" + + + } + buildTypes { + release { + minifyEnabled false + proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro' + } + } +} + +dependencies { + ///implementation fileTree(include: ['*.jar'], dir: 'libs') + implementation 'com.android.support:appcompat-v7:22.2.0' + //implementation 'com.android.support.constraint:constraint-layout:1.1.3' + + //MLKit + implementation 'com.google.firebase:firebase-core:16.0.6' + implementation 'com.google.firebase:firebase-ml-vision:18.0.2' + //implementation 'com.google.firebase:firebase-ml-vision-image-label-model:17.0.2' + implementation 'com.google.firebase:firebase-ml-vision-face-model:17.0.2' + //implementation 'com.google.firebase:firebase-ml-model-interpreter:16.2.3' + + testImplementation 'junit:junit:4.12' + androidTestImplementation 'com.android.support.test:runner:1.0.2' + androidTestImplementation 'com.android.support.test.espresso:espresso-core:3.0.2' + +} + +apply plugin: 'com.google.gms.google-services' \ No newline at end of file diff --git a/app/google-services.json b/app/google-services.json new file mode 100644 index 0000000..78d0a43 --- /dev/null +++ b/app/google-services.json @@ -0,0 +1,42 @@ +{ + "project_info": { + "project_number": "409106625925", + "firebase_url": "https://facial-landmarks-52ffa.firebaseio.com", + "project_id": "facial-landmarks-52ffa", + "storage_bucket": "facial-landmarks-52ffa.appspot.com" + }, + "client": [ + { + "client_info": { + "mobilesdk_app_id": "1:409106625925:android:6f234121d2935301", + "android_client_info": { + "package_name": "com.themon.test.mlkit_faciallandmarks" + } + }, + "oauth_client": [ + { + "client_id": "409106625925-f2haj4870hsqrgu7ml815v75er585eit.apps.googleusercontent.com", + "client_type": 3 + } + ], + "api_key": [ + { + "current_key": "AIzaSyBCDccrUHX5wD2nbWpOy2ZqXVH8dmGoYZ4" + } + ], + "services": { + "analytics_service": { + "status": 1 + }, + "appinvite_service": { + "status": 1, + "other_platform_oauth_client": [] + }, + "ads_service": { + "status": 2 + } + } + } + ], + "configuration_version": "1" +} \ No newline at end of file diff --git a/app/proguard-rules.pro b/app/proguard-rules.pro new file mode 100644 index 0000000..f1b4245 --- /dev/null +++ b/app/proguard-rules.pro @@ -0,0 +1,21 @@ +# Add project specific ProGuard rules here. +# You can control the set of applied configuration files using the +# proguardFiles setting in build.gradle. +# +# For more details, see +# http://developer.android.com/guide/developing/tools/proguard.html + +# If your project uses WebView with JS, uncomment the following +# and specify the fully qualified class name to the JavaScript interface +# class: +#-keepclassmembers class fqcn.of.javascript.interface.for.webview { +# public *; +#} + +# Uncomment this to preserve the line number information for +# debugging stack traces. +#-keepattributes SourceFile,LineNumberTable + +# If you keep the line number information, uncomment this to +# hide the original source file name. +#-renamesourcefileattribute SourceFile diff --git a/app/src/androidTest/java/com/themon/test/mlkit_faciallandmarks/ExampleInstrumentedTest.java b/app/src/androidTest/java/com/themon/test/mlkit_faciallandmarks/ExampleInstrumentedTest.java new file mode 100644 index 0000000..757c317 --- /dev/null +++ b/app/src/androidTest/java/com/themon/test/mlkit_faciallandmarks/ExampleInstrumentedTest.java @@ -0,0 +1,26 @@ +package com.themon.test.mlkit_faciallandmarks; + +import android.content.Context; +import android.support.test.InstrumentationRegistry; +import android.support.test.runner.AndroidJUnit4; + +import org.junit.Test; +import org.junit.runner.RunWith; + +import static org.junit.Assert.*; + +/** + * Instrumented test, which will execute on an Android device. + * + * @see Testing documentation + */ +@RunWith(AndroidJUnit4.class) +public class ExampleInstrumentedTest { + @Test + public void useAppContext() { + // Context of the app under test. + Context appContext = InstrumentationRegistry.getTargetContext(); + + assertEquals("com.themon.test.mlkit_faciallandmarks", appContext.getPackageName()); + } +} diff --git a/app/src/main/AndroidManifest.xml b/app/src/main/AndroidManifest.xml new file mode 100644 index 0000000..2f606da --- /dev/null +++ b/app/src/main/AndroidManifest.xml @@ -0,0 +1,31 @@ + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/app/src/main/java/com/themon/test/mlkit_faciallandmarks/MainActivity.java b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/MainActivity.java new file mode 100644 index 0000000..4024834 --- /dev/null +++ b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/MainActivity.java @@ -0,0 +1,537 @@ +package com.themon.test.mlkit_faciallandmarks; + +import android.app.ProgressDialog; +import android.content.Context; +import android.content.DialogInterface; +import android.content.pm.PackageInfo; +import android.content.pm.PackageManager; +import android.content.res.Configuration; +import android.graphics.Bitmap; +import android.graphics.Canvas; +import android.graphics.Matrix; +import android.graphics.Point; +import android.graphics.Rect; +import android.hardware.Camera; +import android.os.AsyncTask; +import android.os.Build; +import android.os.Bundle; +import android.os.Environment; +import android.os.Handler; +import android.os.HandlerThread; +import android.support.v4.app.ActivityCompat; +import android.support.v4.content.ContextCompat; +import android.support.v7.app.AlertDialog; +import android.support.v7.app.AppCompatActivity; +import android.text.InputType; +import android.util.Log; +import android.view.Display; +import android.view.View; +import android.view.WindowManager; +import android.widget.AdapterView; +import android.widget.ArrayAdapter; +import android.widget.Button; +import android.widget.CompoundButton; +import android.widget.EditText; +import android.widget.Spinner; +import android.widget.TextView; +import android.widget.ToggleButton; + +import com.google.firebase.ml.vision.face.FirebaseVisionFace; +import com.themon.test.mlkit_faciallandmarks.common.BitmapUtils; +import com.themon.test.mlkit_faciallandmarks.common.CameraSource; +import com.themon.test.mlkit_faciallandmarks.common.CameraSourcePreview; +import com.themon.test.mlkit_faciallandmarks.common.FrameMetadata; +import com.themon.test.mlkit_faciallandmarks.common.GraphicOverlay; +import com.themon.test.mlkit_faciallandmarks.facedetection.FaceContourDetectorProcessor; +import com.themon.test.mlkit_faciallandmarks.facedetection.FaceDetectionProcessor; +import com.tzutalin.dlib.Constants; +import com.tzutalin.dlib.FaceRec; +import com.tzutalin.dlib.FileUtils; +import com.tzutalin.dlib.VisionDetRet; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Pattern; + +public class MainActivity extends AppCompatActivity + implements ActivityCompat.OnRequestPermissionsResultCallback, + AdapterView.OnItemSelectedListener, + CompoundButton.OnCheckedChangeListener { + private static final String FACE_DETECTION = "Face Detection"; + private static final String TEXT_DETECTION = "Text Detection"; + private static final String BARCODE_DETECTION = "Barcode Detection"; + private static final String IMAGE_LABEL_DETECTION = "Label Detection"; + private static final String CLASSIFICATION_QUANT = "Classification (quantized)"; + private static final String CLASSIFICATION_FLOAT = "Classification (float)"; + private static final String FACE_CONTOUR = "Face Contour"; + private static final String TAG = "LivePreviewActivity"; + private static final int PERMISSION_REQUESTS = 1; + + private String name; + private CameraSource cameraSource = null; + private CameraSourcePreview preview; + private GraphicOverlay graphicOverlay; + private String selectedModel = FACE_CONTOUR; + //private PersonRecognizer mFaceRecognzier = null; + + private FaceRec mFaceRec; + + private Handler mBackgroundHandler; + + @Override + protected void onCreate(Bundle savedInstanceState) { + super.onCreate(savedInstanceState); + Log.d(TAG, "onCreate"); + + setContentView(R.layout.activity_main); + + preview = (CameraSourcePreview) findViewById(R.id.firePreview); + if (preview == null) { + Log.d(TAG, "Preview is null"); + } + graphicOverlay = (GraphicOverlay) findViewById(R.id.fireFaceOverlay); + if (graphicOverlay == null) { + Log.d(TAG, "graphicOverlay is null"); + } + + Spinner spinner = (Spinner) findViewById(R.id.spinner); + List options = new ArrayList<>(); + options.add(FACE_CONTOUR); + options.add(FACE_DETECTION); + // Creating adapter for spinner + ArrayAdapter dataAdapter = new ArrayAdapter(this, R.layout.spinner_style, options); + // Drop down layout style - list view with radio button + dataAdapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item); + // attaching data adapter to spinner + spinner.setAdapter(dataAdapter); + spinner.setOnItemSelectedListener(this); + + ToggleButton facingSwitch = (ToggleButton) findViewById(R.id.facingSwitch); + facingSwitch.setChecked(true); + facingSwitch.setOnCheckedChangeListener(this); + // Hide the toggle button if there is only 1 camera + if (Camera.getNumberOfCameras() == 1) { + facingSwitch.setVisibility(View.GONE); + } + + if (allPermissionsGranted()) { + createCameraSource(selectedModel); + } else { + getRuntimePermissions(); + } + + //Initiliaze face recognizer + new initRecAsync(null).execute(); + Button addButton = (Button) findViewById(R.id.add); + Button recogButton = (Button) findViewById(R.id.recog); + + addButton.setOnClickListener(new View.OnClickListener() { + + @Override + public void onClick(View view) { + showAlertDialog(); + } + }); + recogButton.setOnClickListener(new View.OnClickListener() { + @Override + public void onClick(View view) { + MainActivity.this.onPause(); + //Get current camera frame + Log.d(TAG, "Getting camera frmae"); + List faces = cameraSource.getMachineLearningFrameProcessor().getFaces(); + ByteBuffer latestImage = cameraSource.getMachineLearningFrameProcessor().getLatestImage(); + FrameMetadata latestFrameMetadata = cameraSource.getMachineLearningFrameProcessor().getLatestImageMetadata(); + + if (!faces.isEmpty() && latestImage != null) { + Bitmap bitmap = BitmapUtils.getBitmap(latestImage, latestFrameMetadata); + Rect bbox = faces.get(0).getBoundingBox(); + Log.d(TAG, bbox.left + " " + bbox.top + " " + bbox.width() + " " + bbox.height()); + Bitmap crop = Bitmap.createBitmap(bitmap, bbox.left, bbox.top, + Math.min(bbox.width(), bitmap.getWidth()), Math.min(bbox.height(), bitmap.getHeight())); + + new recognizeAsync(crop).execute(); + } else { + TextView textView = (TextView) findViewById(R.id.text); + textView.setText("No person found!"); + MainActivity.this.onResume(); + } + } + }); + } + + private void showAlertDialog() { + AlertDialog.Builder builder = new AlertDialog.Builder(this); + builder.setTitle("Enter name"); + + final EditText input = new EditText(this); + input.setInputType(InputType.TYPE_CLASS_TEXT);// | InputType.TYPE_TEXT_VARIATION_PASSWORD); + builder.setView(input); + + //Get current camera frame + final List faces = cameraSource.getMachineLearningFrameProcessor().getFaces(); + ByteBuffer latestImage = cameraSource.getMachineLearningFrameProcessor().getLatestImage(); + FrameMetadata latestFrameMetadata = cameraSource.getMachineLearningFrameProcessor().getLatestImageMetadata(); + final Bitmap bitmap = BitmapUtils.getBitmap(latestImage, latestFrameMetadata); + + builder.setPositiveButton("OK", new DialogInterface.OnClickListener() { + @Override + public void onClick(DialogInterface dialog, int which) { + if (faces.isEmpty()) { + dialog.cancel(); + } else { + Rect bbox = faces.get(0).getBoundingBox(); + Bitmap crop = Bitmap.createBitmap(bitmap, bbox.left, bbox.top, bbox.width(), bbox.height()); + + MainActivity.this.onPause(); + name = input.getText().toString(); + System.out.println("INPUT: " + name); + + new initRecAsync(crop).execute(); + } + } + }); + builder.setNegativeButton("Cancel", new DialogInterface.OnClickListener() { + @Override + public void onClick(DialogInterface dialog, int which) { + dialog.cancel(); + System.out.println("INPUT: canceld"); + } + }); + + builder.show(); + } + + private class initRecAsync extends AsyncTask { + ProgressDialog dialog = new ProgressDialog(MainActivity.this); + Bitmap bitmap; + boolean write; + + public initRecAsync(Bitmap bitmap) { + write = (bitmap != null); + this.bitmap = bitmap; + } + + @Override + protected void onPreExecute() { + Log.d(TAG, "initRecAsync onPreExecute called"); + dialog.setMessage("Initializing..."); + dialog.setCancelable(false); + dialog.show(); + super.onPreExecute(); + } + + protected Void doInBackground(Void... args) { + // create dlib_rec_example directory in sd card and copy model files + Log.d(TAG, "Doing in background"); + mFaceRec = new FaceRec(Constants.getDLibDirectoryPath()); + File folder = new File(Constants.getDLibDirectoryPath()); + boolean success = false; + if (!folder.exists()) { + success = folder.mkdirs(); + } + Log.d(TAG, "DLIB directory exists"); + + if(write) { + if (success) { + File image_folder = new File(Constants.getDLibImageDirectoryPath()); + image_folder.mkdirs(); + if (!new File(Constants.getFaceShapeModelPath()).exists()) { + FileUtils.copyFileFromRawToOthers(MainActivity.this, R.raw.shape_predictor_5_face_landmarks, Constants.getFaceShapeModelPath()); + } + if (!new File(Constants.getFaceDescriptorModelPath()).exists()) { + FileUtils.copyFileFromRawToOthers(MainActivity.this, R.raw.dlib_face_recognition_resnet_model_v1, Constants.getFaceDescriptorModelPath()); + } + } else { + Log.d(TAG, "error in setting dlib_rec_example directory"); + } + Log.d(TAG, "Creating Face Recog" + Constants.getDLibDirectoryPath()); + + // Add Person + Log.d(TAG, "Writing image"); + String targetPath = Constants.getDLibImageDirectoryPath() + "/" + name + ".jpg"; + FileOutputStream out = null; + try { + out = new FileOutputStream(targetPath); + bitmap.compress(Bitmap.CompressFormat.PNG, 100, out); // bmp is your Bitmap instance + // PNG is a lossless format, the compression factor (100) is ignored + } catch (Exception e) { + e.printStackTrace(); + } finally { + try { + if (out != null) { + out.close(); + } + } catch (IOException e) { + e.printStackTrace(); + } + } + } + + mFaceRec.train(); + return null; + } + + protected void onPostExecute(Void result) { + if(dialog != null && dialog.isShowing()){ + dialog.dismiss(); + } + MainActivity.this.onResume(); + } + } + + private class recognizeAsync extends AsyncTask> { + + ProgressDialog dialog = new ProgressDialog(MainActivity.this); + private boolean mIsComputing = false; + Bitmap bitmap; + + public recognizeAsync(Bitmap bitmap) { + this.bitmap = bitmap; + } + + @Override + protected void onPreExecute() { + dialog.setMessage("Recognizing..."); + dialog.setCancelable(false); + dialog.show(); + super.onPreExecute(); + } + + @Override + protected ArrayList doInBackground(Void... voids) { + ArrayList names = new ArrayList<>(); + + try { + long startTime = System.currentTimeMillis(); + //mFaceRec = new FaceRec(Constants.getDLibDirectoryPath()); + List results = MainActivity.this.mFaceRec.recognize(bitmap); + + for (VisionDetRet n : results) { + names.add(n.getLabel()); + } + + long endTime = System.currentTimeMillis(); + Log.d(TAG, "Time cost: " + String.valueOf((endTime - startTime) / 1000f) + " sec"); + } catch (NullPointerException ex){ + Log.d(TAG, "No bitmap!"); + throw ex; + } + + return names; + } + + protected void onPostExecute(ArrayList names) { + if(dialog != null && dialog.isShowing()){ + dialog.dismiss(); + AlertDialog.Builder builder1 = new AlertDialog.Builder(MainActivity.this); + String result = getResultMessage(names); + builder1.setMessage(result); + builder1.setCancelable(true); + AlertDialog alert11 = builder1.create(); + alert11.show(); + + //Set Text + TextView textView = (TextView) findViewById(R.id.text); + textView.setText(result); + MainActivity.this.onResume(); + } + } + } + + private String getResultMessage(ArrayList names) { + String msg = new String(); + if (names.isEmpty()) { + msg = "No face detected or Unknown person"; + + } else { + for(int i=0; i parent, View view, int pos, long id) { + // An item was selected. You can retrieve the selected item using + // parent.getItemAtPosition(pos) + selectedModel = parent.getItemAtPosition(pos).toString(); + Log.d(TAG, "Selected model: " + selectedModel); + preview.stop(); + if (allPermissionsGranted()) { + createCameraSource(selectedModel); + cameraSource.setFacing(CameraSource.CAMERA_FACING_FRONT); + startCameraSource(); + } else { + getRuntimePermissions(); + } + } + + @Override + public void onNothingSelected(AdapterView parent) { + // Do nothing. + } + + @Override + public void onCheckedChanged(CompoundButton buttonView, boolean isChecked) { + Log.d(TAG, "Set facing"); + if (cameraSource != null) { + if (isChecked) { + cameraSource.setFacing(CameraSource.CAMERA_FACING_FRONT); + } else { + cameraSource.setFacing(CameraSource.CAMERA_FACING_BACK); + } + } + preview.stop(); + startCameraSource(); + } + + private void createCameraSource(String model) { + // If there's no existing cameraSource, create one. + if (cameraSource == null) { + cameraSource = new CameraSource(this, graphicOverlay); + } + + switch (model) { + case FACE_DETECTION: + Log.i(TAG, "Using Face Detector Processor"); + cameraSource.setMachineLearningFrameProcessor(new FaceDetectionProcessor()); + break; + case FACE_CONTOUR: + Log.i(TAG, "Using Face Contour Detector Processor"); + cameraSource.setMachineLearningFrameProcessor(new FaceContourDetectorProcessor()); + //cameraSource.setMachineLearningFrameProcessor(new FaceContourDetectorProcessor(mFaceRecognzier)); + break; + default: + break; + } + } + + /** + * Starts or restarts the camera source, if it exists. If the camera source doesn't exist yet + * (e.g., because onResume was called before the camera source was created), this will be called + * again when the camera source is created. + */ + private void startCameraSource() { + if (cameraSource != null) { + try { + if (preview == null) { + Log.d(TAG, "resume: Preview is null"); + } + if (graphicOverlay == null) { + Log.d(TAG, "resume: graphOverlay is null"); + } + preview.start(cameraSource, graphicOverlay); + + } catch (IOException e) { + Log.e(TAG, "Unable to start camera source.", e); + cameraSource.release(); + cameraSource = null; + } + } + } + + @Override + public void onResume() { + super.onResume(); + Log.d(TAG, "onResume"); + startCameraSource(); + } + + /** Stops the camera. */ + @Override + protected void onPause() { + super.onPause(); + preview.stop(); + } + + @Override + public void onDestroy() { + super.onDestroy(); + if (cameraSource != null) { + cameraSource.release(); + } + if (mBackgroundHandler != null) { + if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR2) { + mBackgroundHandler.getLooper().quitSafely(); + } else { + mBackgroundHandler.getLooper().quit(); + } + mBackgroundHandler = null; + } + } + + private Handler getBackgroundHandler() { + if (mBackgroundHandler == null) { + HandlerThread thread = new HandlerThread("background"); + thread.start(); + mBackgroundHandler = new Handler(thread.getLooper()); + } + return mBackgroundHandler; + } + + private String[] getRequiredPermissions() { + try { + PackageInfo info = + this.getPackageManager() + .getPackageInfo(this.getPackageName(), PackageManager.GET_PERMISSIONS); + String[] ps = info.requestedPermissions; + if (ps != null && ps.length > 0) { + return ps; + } else { + return new String[0]; + } + } catch (Exception e) { + return new String[0]; + } + } + + private boolean allPermissionsGranted() { + for (String permission : getRequiredPermissions()) { + if (!isPermissionGranted(this, permission)) { + return false; + } + } + return true; + } + + private void getRuntimePermissions() { + List allNeededPermissions = new ArrayList<>(); + for (String permission : getRequiredPermissions()) { + if (!isPermissionGranted(this, permission)) { + allNeededPermissions.add(permission); + } + } + + if (!allNeededPermissions.isEmpty()) { + ActivityCompat.requestPermissions( + this, allNeededPermissions.toArray(new String[0]), PERMISSION_REQUESTS); + } + } + + @Override + public void onRequestPermissionsResult( + int requestCode, String[] permissions, int[] grantResults) { + Log.i(TAG, "Permission granted!"); + if (allPermissionsGranted()) { + createCameraSource(selectedModel); + } + super.onRequestPermissionsResult(requestCode, permissions, grantResults); + } + + private static boolean isPermissionGranted(Context context, String permission) { + if (ContextCompat.checkSelfPermission(context, permission) + == PackageManager.PERMISSION_GRANTED) { + Log.i(TAG, "Permission granted: " + permission); + return true; + } + Log.i(TAG, "Permission NOT granted: " + permission); + return false; + } +} diff --git a/app/src/main/java/com/themon/test/mlkit_faciallandmarks/MainActivity2.java b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/MainActivity2.java new file mode 100644 index 0000000..5751e2d --- /dev/null +++ b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/MainActivity2.java @@ -0,0 +1,299 @@ +package com.themon.test.mlkit_faciallandmarks; + +import android.content.Context; +import android.content.DialogInterface; +import android.content.pm.PackageInfo; +import android.content.pm.PackageManager; +import android.hardware.Camera; +import android.os.Bundle; +import android.support.v4.app.ActivityCompat; +import android.support.v4.content.ContextCompat; +import android.support.v7.app.AlertDialog; +import android.support.v7.app.AppCompatActivity; +import android.text.InputType; +import android.util.Log; +import android.view.View; +import android.widget.AdapterView; +import android.widget.ArrayAdapter; +import android.widget.Button; +import android.widget.CompoundButton; +import android.widget.EditText; +import android.widget.Spinner; +import android.widget.TextView; +import android.widget.ToggleButton; + +import com.themon.test.mlkit_faciallandmarks.common.CameraSource; +import com.themon.test.mlkit_faciallandmarks.common.CameraSourcePreview; +import com.themon.test.mlkit_faciallandmarks.common.GraphicOverlay; +import com.themon.test.mlkit_faciallandmarks.facedetection.FaceDetectionProcessor; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class MainActivity2 extends AppCompatActivity + implements ActivityCompat.OnRequestPermissionsResultCallback, + AdapterView.OnItemSelectedListener, + CompoundButton.OnCheckedChangeListener { + private static final String FACE_DETECTION = "Face Detection"; + private static final String TEXT_DETECTION = "Text Detection"; + private static final String BARCODE_DETECTION = "Barcode Detection"; + private static final String IMAGE_LABEL_DETECTION = "Label Detection"; + private static final String CLASSIFICATION_QUANT = "Classification (quantized)"; + private static final String CLASSIFICATION_FLOAT = "Classification (float)"; + private static final String FACE_CONTOUR = "Face Contour"; + private static final String TAG = "LivePreviewActivity"; + private static final int PERMISSION_REQUESTS = 1; + + private String name; + private CameraSource cameraSource = null; + private CameraSourcePreview preview; + private GraphicOverlay graphicOverlay; + private String selectedModel = FACE_CONTOUR; + + @Override + protected void onCreate(Bundle savedInstanceState) { + super.onCreate(savedInstanceState); + Log.d(TAG, "onCreate"); + + setContentView(R.layout.activity_main); + + preview = (CameraSourcePreview) findViewById(R.id.firePreview); + if (preview == null) { + Log.d(TAG, "Preview is null"); + } + graphicOverlay = (GraphicOverlay) findViewById(R.id.fireFaceOverlay); + if (graphicOverlay == null) { + Log.d(TAG, "graphicOverlay is null"); + } + + Spinner spinner = (Spinner) findViewById(R.id.spinner); + List options = new ArrayList<>(); + options.add(FACE_CONTOUR); + options.add(FACE_DETECTION); + // Creating adapter for spinner + ArrayAdapter dataAdapter = new ArrayAdapter(this, R.layout.spinner_style, options); + // Drop down layout style - list view with radio button + dataAdapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item); + // attaching data adapter to spinner + spinner.setAdapter(dataAdapter); + spinner.setOnItemSelectedListener(this); + + ToggleButton facingSwitch = (ToggleButton) findViewById(R.id.facingSwitch); + facingSwitch.setChecked(true); + facingSwitch.setOnCheckedChangeListener(this); + // Hide the toggle button if there is only 1 camera + if (Camera.getNumberOfCameras() == 1) { + facingSwitch.setVisibility(View.GONE); + } + + if (allPermissionsGranted()) { + createCameraSource(selectedModel); + } else { + getRuntimePermissions(); + } + + + Button addButton = (Button) findViewById(R.id.add); + + addButton.setOnClickListener(new View.OnClickListener() { + + @Override + public void onClick(View view) { + showAlertDialog(); + } + }); + + } + + private void showAlertDialog() { + AlertDialog.Builder builder = new AlertDialog.Builder(this); + builder.setTitle("Enter name"); + + final EditText input = new EditText(this); + input.setInputType(InputType.TYPE_CLASS_TEXT);// | InputType.TYPE_TEXT_VARIATION_PASSWORD); + builder.setView(input); + + builder.setPositiveButton("OK", new DialogInterface.OnClickListener() { + @Override + public void onClick(DialogInterface dialog, int which) { + name = input.getText().toString(); + System.out.println("INPUT: " + name); + + //Set Text + TextView textView = (TextView) findViewById(R.id.text); + textView.setText(name); + + //Crop face image + } + }); + builder.setNegativeButton("Cancel", new DialogInterface.OnClickListener() { + @Override + public void onClick(DialogInterface dialog, int which) { + dialog.cancel(); + System.out.println("INPUT: canceld"); + } + }); + + builder.show(); + } + + @Override + public synchronized void onItemSelected(AdapterView parent, View view, int pos, long id) { + // An item was selected. You can retrieve the selected item using + // parent.getItemAtPosition(pos) + selectedModel = parent.getItemAtPosition(pos).toString(); + Log.d(TAG, "Selected model: " + selectedModel); + preview.stop(); + if (allPermissionsGranted()) { + createCameraSource(selectedModel); + cameraSource.setFacing(CameraSource.CAMERA_FACING_FRONT); + startCameraSource(); + } else { + getRuntimePermissions(); + } + } + + @Override + public void onNothingSelected(AdapterView parent) { + // Do nothing. + } + + @Override + public void onCheckedChanged(CompoundButton buttonView, boolean isChecked) { + Log.d(TAG, "Set facing"); + if (cameraSource != null) { + if (isChecked) { + cameraSource.setFacing(CameraSource.CAMERA_FACING_FRONT); + } else { + cameraSource.setFacing(CameraSource.CAMERA_FACING_BACK); + } + } + preview.stop(); + startCameraSource(); + } + + private void createCameraSource(String model) { + // If there's no existing cameraSource, create one. + if (cameraSource == null) { + cameraSource = new CameraSource(this, graphicOverlay); + } + + switch (model) { + case FACE_DETECTION: + Log.i(TAG, "Using Face Detector Processor"); + cameraSource.setMachineLearningFrameProcessor(new FaceDetectionProcessor()); + break; + case FACE_CONTOUR: + Log.i(TAG, "Using Face Contour Detector Processor"); + //cameraSource.setMachineLearningFrameProcessor(new FaceContourDetectorProcessor()); + break; + default: + break; + } + } + + /** + * Starts or restarts the camera source, if it exists. If the camera source doesn't exist yet + * (e.g., because onResume was called before the camera source was created), this will be called + * again when the camera source is created. + */ + private void startCameraSource() { + if (cameraSource != null) { + try { + if (preview == null) { + Log.d(TAG, "resume: Preview is null"); + } + if (graphicOverlay == null) { + Log.d(TAG, "resume: graphOverlay is null"); + } + preview.start(cameraSource, graphicOverlay); + } catch (IOException e) { + Log.e(TAG, "Unable to start camera source.", e); + cameraSource.release(); + cameraSource = null; + } + } + } + + @Override + public void onResume() { + super.onResume(); + Log.d(TAG, "onResume"); + startCameraSource(); + } + + /** Stops the camera. */ + @Override + protected void onPause() { + super.onPause(); + preview.stop(); + } + + @Override + public void onDestroy() { + super.onDestroy(); + if (cameraSource != null) { + cameraSource.release(); + } + } + + private String[] getRequiredPermissions() { + try { + PackageInfo info = + this.getPackageManager() + .getPackageInfo(this.getPackageName(), PackageManager.GET_PERMISSIONS); + String[] ps = info.requestedPermissions; + if (ps != null && ps.length > 0) { + return ps; + } else { + return new String[0]; + } + } catch (Exception e) { + return new String[0]; + } + } + + private boolean allPermissionsGranted() { + for (String permission : getRequiredPermissions()) { + if (!isPermissionGranted(this, permission)) { + return false; + } + } + return true; + } + + private void getRuntimePermissions() { + List allNeededPermissions = new ArrayList<>(); + for (String permission : getRequiredPermissions()) { + if (!isPermissionGranted(this, permission)) { + allNeededPermissions.add(permission); + } + } + + if (!allNeededPermissions.isEmpty()) { + ActivityCompat.requestPermissions( + this, allNeededPermissions.toArray(new String[0]), PERMISSION_REQUESTS); + } + } + + @Override + public void onRequestPermissionsResult( + int requestCode, String[] permissions, int[] grantResults) { + Log.i(TAG, "Permission granted!"); + if (allPermissionsGranted()) { + createCameraSource(selectedModel); + } + super.onRequestPermissionsResult(requestCode, permissions, grantResults); + } + + private static boolean isPermissionGranted(Context context, String permission) { + if (ContextCompat.checkSelfPermission(context, permission) + == PackageManager.PERMISSION_GRANTED) { + Log.i(TAG, "Permission granted: " + permission); + return true; + } + Log.i(TAG, "Permission NOT granted: " + permission); + return false; + } +} diff --git a/app/src/main/java/com/themon/test/mlkit_faciallandmarks/VisionProcessorBase.java b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/VisionProcessorBase.java new file mode 100644 index 0000000..9594de3 --- /dev/null +++ b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/VisionProcessorBase.java @@ -0,0 +1,165 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package com.themon.test.mlkit_faciallandmarks; + +import android.graphics.Bitmap; +import android.support.annotation.GuardedBy; +import android.support.annotation.NonNull; +import android.support.annotation.Nullable; + +import com.google.android.gms.tasks.OnFailureListener; +import com.google.android.gms.tasks.OnSuccessListener; +import com.google.android.gms.tasks.Task; +import com.google.firebase.ml.vision.common.FirebaseVisionImage; +import com.google.firebase.ml.vision.common.FirebaseVisionImageMetadata; +import com.google.firebase.ml.vision.face.FirebaseVisionFace; +import com.themon.test.mlkit_faciallandmarks.common.BitmapUtils; +import com.themon.test.mlkit_faciallandmarks.common.FrameMetadata; +import com.themon.test.mlkit_faciallandmarks.common.GraphicOverlay; +import com.themon.test.mlkit_faciallandmarks.common.VisionImageProcessor; + +import java.nio.ByteBuffer; +import java.util.List; + +/** + * Abstract base class for ML Kit frame processors. Subclasses need to implement {@link + * #onSuccess(Bitmap, Object, FrameMetadata, GraphicOverlay)} to define what they want to with + * the detection results and {@link #detectInImage(FirebaseVisionImage)} to specify the detector + * object. + * + * @param The type of the detected feature. + */ +public abstract class VisionProcessorBase implements VisionImageProcessor { + + // To keep the latest images and its metadata. + @GuardedBy("this") + private ByteBuffer latestImage; + + @GuardedBy("this") + private FrameMetadata latestImageMetaData; + + // To keep the images and metadata in process. + @GuardedBy("this") + private ByteBuffer processingImage; + + @GuardedBy("this") + + private FrameMetadata processingMetaData; + + public VisionProcessorBase() { + } + + public ByteBuffer getLatestImage() { + return this.processingImage; + } + + public FrameMetadata getLatestImageMetadata() { + return this.processingMetaData; + } + + protected List faces; + public List getFaces() { + return faces; + } + + @Override + public synchronized void process( + ByteBuffer data, final FrameMetadata frameMetadata, final GraphicOverlay graphicOverlay) { + latestImage = data; + latestImageMetaData = frameMetadata; + if (processingImage == null && processingMetaData == null) { + processLatestImage(graphicOverlay); + } + } + + // Bitmap version + @Override + public void process(Bitmap bitmap, final GraphicOverlay + graphicOverlay) { + detectInVisionImage(null /* bitmap */, FirebaseVisionImage.fromBitmap(bitmap), null, + graphicOverlay); + } + + private synchronized void processLatestImage(final GraphicOverlay graphicOverlay) { + processingImage = latestImage; + processingMetaData = latestImageMetaData; + latestImage = null; + latestImageMetaData = null; + if (processingImage != null && processingMetaData != null) { + processImage(processingImage, processingMetaData, graphicOverlay); + } + } + + private void processImage( + ByteBuffer data, final FrameMetadata frameMetadata, + final GraphicOverlay graphicOverlay) { + FirebaseVisionImageMetadata metadata = + new FirebaseVisionImageMetadata.Builder() + .setFormat(FirebaseVisionImageMetadata.IMAGE_FORMAT_NV21) + .setWidth(frameMetadata.getWidth()) + .setHeight(frameMetadata.getHeight()) + .setRotation(frameMetadata.getRotation()) + .build(); + + Bitmap bitmap = BitmapUtils.getBitmap(data, frameMetadata); + detectInVisionImage( + bitmap, FirebaseVisionImage.fromByteBuffer(data, metadata), frameMetadata, + graphicOverlay); + } + + private void detectInVisionImage( + final Bitmap originalCameraImage, + FirebaseVisionImage image, + final FrameMetadata metadata, + final GraphicOverlay graphicOverlay) { + detectInImage(image) + .addOnSuccessListener( + new OnSuccessListener() { + @Override + public void onSuccess(T results) { + VisionProcessorBase.this.onSuccess(originalCameraImage, results, + metadata, + graphicOverlay); + processLatestImage(graphicOverlay); + } + }) + .addOnFailureListener( + new OnFailureListener() { + @Override + public void onFailure(@NonNull Exception e) { + VisionProcessorBase.this.onFailure(e); + } + }); + } + + @Override + public void stop() { + } + + protected abstract Task detectInImage(FirebaseVisionImage image); + + /** + * Callback that executes with a successful detection result. + * + * @param originalCameraImage hold the original image from camera, used to draw the background + * image. + */ + protected abstract void onSuccess( + @Nullable Bitmap originalCameraImage, + @NonNull T results, + @NonNull FrameMetadata frameMetadata, + @NonNull GraphicOverlay graphicOverlay); + + protected abstract void onFailure(@NonNull Exception e); +} diff --git a/app/src/main/java/com/themon/test/mlkit_faciallandmarks/common/BitmapUtils.java b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/common/BitmapUtils.java new file mode 100644 index 0000000..f401738 --- /dev/null +++ b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/common/BitmapUtils.java @@ -0,0 +1,75 @@ +package com.themon.test.mlkit_faciallandmarks.common; + +import android.graphics.Bitmap; +import android.graphics.BitmapFactory; +import android.graphics.ImageFormat; +import android.graphics.Matrix; +import android.graphics.Rect; +import android.graphics.YuvImage; +import android.hardware.Camera.CameraInfo; +import android.support.annotation.Nullable; +import android.util.Log; + +import com.google.firebase.ml.vision.common.FirebaseVisionImageMetadata; + +import java.io.ByteArrayOutputStream; +import java.nio.ByteBuffer; + +/** Utils functions for bitmap conversions. */ +public class BitmapUtils { + + // Convert NV21 format byte buffer to bitmap. + @Nullable + public static Bitmap getBitmap(ByteBuffer data, FrameMetadata metadata) { + data.rewind(); + byte[] imageInBuffer = new byte[data.limit()]; + data.get(imageInBuffer, 0, imageInBuffer.length); + try { + YuvImage image = + new YuvImage( + imageInBuffer, ImageFormat.NV21, metadata.getWidth(), metadata.getHeight(), null); + if (image != null) { + ByteArrayOutputStream stream = new ByteArrayOutputStream(); + image.compressToJpeg(new Rect(0, 0, metadata.getWidth(), metadata.getHeight()), 80, stream); + + Bitmap bmp = BitmapFactory.decodeByteArray(stream.toByteArray(), 0, stream.size()); + + stream.close(); + return rotateBitmap(bmp, metadata.getRotation(), metadata.getCameraFacing()); + } + } catch (Exception e) { + Log.e("VisionProcessorBase", "Error: " + e.getMessage()); + } + return null; + } + + // Rotates a bitmap if it is converted from a bytebuffer. + private static Bitmap rotateBitmap(Bitmap bitmap, int rotation, int facing) { + Matrix matrix = new Matrix(); + int rotationDegree = 0; + switch (rotation) { + case FirebaseVisionImageMetadata.ROTATION_90: + rotationDegree = 90; + break; + case FirebaseVisionImageMetadata.ROTATION_180: + rotationDegree = 180; + break; + case FirebaseVisionImageMetadata.ROTATION_270: + rotationDegree = 270; + break; + default: + break; + } + + // Rotate the image back to straight.} + matrix.postRotate(rotationDegree); + if (facing == CameraInfo.CAMERA_FACING_BACK) { + return Bitmap.createBitmap(bitmap, 0, 0, bitmap.getWidth(), bitmap.getHeight(), matrix, true); + } else { + // Mirror the image along X axis for front-facing camera image. + matrix.postScale(-1.0f, 1.0f); + return Bitmap.createBitmap(bitmap, 0, 0, bitmap.getWidth(), bitmap.getHeight(), matrix, true); + } + } +} + diff --git a/app/src/main/java/com/themon/test/mlkit_faciallandmarks/common/CameraImageGraphic.java b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/common/CameraImageGraphic.java new file mode 100644 index 0000000..95fd0a4 --- /dev/null +++ b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/common/CameraImageGraphic.java @@ -0,0 +1,24 @@ +package com.themon.test.mlkit_faciallandmarks.common; + +import android.graphics.Bitmap; +import android.graphics.Canvas; +import android.graphics.Rect; + +import com.themon.test.mlkit_faciallandmarks.common.GraphicOverlay.Graphic; + +/** Draw camera image to background. */ +public class CameraImageGraphic extends Graphic { + + private final Bitmap bitmap; + + public CameraImageGraphic(GraphicOverlay overlay, Bitmap bitmap) { + super(overlay); + this.bitmap = bitmap; + } + + @Override + public void draw(Canvas canvas) { + canvas.drawBitmap(bitmap, null, new Rect(0, 0, canvas.getWidth(), canvas.getHeight()), null); + } +} + diff --git a/app/src/main/java/com/themon/test/mlkit_faciallandmarks/common/CameraSource.java b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/common/CameraSource.java new file mode 100644 index 0000000..27d3103 --- /dev/null +++ b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/common/CameraSource.java @@ -0,0 +1,745 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.themon.test.mlkit_faciallandmarks.common; + +import android.Manifest; +import android.annotation.SuppressLint; +import android.app.Activity; +import android.content.Context; +import android.graphics.ImageFormat; +import android.graphics.SurfaceTexture; +import android.hardware.Camera; +import android.hardware.Camera.CameraInfo; +import android.support.annotation.Nullable; +import android.support.annotation.RequiresPermission; +import android.util.Log; +import android.view.Surface; +import android.view.SurfaceHolder; +import android.view.WindowManager; + +import com.google.android.gms.common.images.Size; + +import java.io.IOException; +import java.lang.Thread.State; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.IdentityHashMap; +import java.util.List; +import java.util.Map; + +/** + * Manages the camera and allows UI updates on top of it (e.g. overlaying extra Graphics or + * displaying extra information). This receives preview frames from the camera at a specified rate, + * sending those frames to child classes' detectors / classifiers as fast as it is able to process. + */ +@SuppressLint("MissingPermission") +public class CameraSource { + @SuppressLint("InlinedApi") + public static final int CAMERA_FACING_BACK = CameraInfo.CAMERA_FACING_BACK; + + @SuppressLint("InlinedApi") + public static final int CAMERA_FACING_FRONT = CameraInfo.CAMERA_FACING_FRONT; + + private static final String TAG = "MIDemoApp:CameraSource"; + + /** + * The dummy surface texture must be assigned a chosen name. Since we never use an OpenGL context, + * we can choose any ID we want here. The dummy surface texture is not a crazy hack - it is + * actually how the camera team recommends using the camera without a preview. + */ + private static final int DUMMY_TEXTURE_NAME = 100; + + /** + * If the absolute difference between a preview size aspect ratio and a picture size aspect ratio + * is less than this tolerance, they are considered to be the same aspect ratio. + */ + private static final float ASPECT_RATIO_TOLERANCE = 0.01f; + + protected Activity activity; + + private Camera camera; + + protected int facing = CAMERA_FACING_BACK; + + /** + * Rotation of the device, and thus the associated preview images captured from the device. See + * Frame.Metadata#getRotation(). + */ + private int rotation; + + private Size previewSize; + + // These values may be requested by the caller. Due to hardware limitations, we may need to + // select close, but not exactly the same values for these. + private final float requestedFps = 20.0f; + private final int requestedPreviewWidth = 480; + private final int requestedPreviewHeight = 360; + private final boolean requestedAutoFocus = true; + + // These instances need to be held onto to avoid GC of their underlying resources. Even though + // these aren't used outside of the method that creates them, they still must have hard + // references maintained to them. + private SurfaceTexture dummySurfaceTexture; + private final GraphicOverlay graphicOverlay; + + // True if a SurfaceTexture is being used for the preview, false if a SurfaceHolder is being + // used for the preview. We want to be compatible back to Gingerbread, but SurfaceTexture + // wasn't introduced until Honeycomb. Since the interface cannot use a SurfaceTexture, if the + // developer wants to display a preview we must use a SurfaceHolder. If the developer doesn't + // want to display a preview we use a SurfaceTexture if we are running at least Honeycomb. + private boolean usingSurfaceTexture; + + /** + * Dedicated thread and associated runnable for calling into the detector with frames, as the + * frames become available from the camera. + */ + private Thread processingThread; + + private final FrameProcessingRunnable processingRunnable; + + private final Object processorLock = new Object(); + // @GuardedBy("processorLock") + private VisionImageProcessor frameProcessor; + + /** + * Map to convert between a byte array, received from the camera, and its associated byte buffer. + * We use byte buffers internally because this is a more efficient way to call into native code + * later (avoids a potential copy). + * + *

Note: uses IdentityHashMap here instead of HashMap because the behavior of an array's + * equals, hashCode and toString methods is both useless and unexpected. IdentityHashMap enforces + * identity ('==') check on the keys. + */ + private final Map bytesToByteBuffer = new IdentityHashMap<>(); + + public CameraSource(Activity activity, GraphicOverlay overlay) { + this.activity = activity; + graphicOverlay = overlay; + graphicOverlay.clear(); + processingRunnable = new FrameProcessingRunnable(); + + if (Camera.getNumberOfCameras() == 1) { + CameraInfo cameraInfo = new CameraInfo(); + Camera.getCameraInfo(0, cameraInfo); + facing = cameraInfo.facing; + } + } + + // ============================================================================================== + // Public + // ============================================================================================== + + /** Stops the camera and releases the resources of the camera and underlying detector. */ + public void release() { + synchronized (processorLock) { + stop(); + processingRunnable.release(); + cleanScreen(); + + if (frameProcessor != null) { + frameProcessor.stop(); + } + } + } + + /** + * Opens the camera and starts sending preview frames to the underlying detector. The preview + * frames are not displayed. + * + * @throws IOException if the camera's preview texture or display could not be initialized + */ + @SuppressLint("MissingPermission") + @RequiresPermission(Manifest.permission.CAMERA) + public synchronized CameraSource start() throws IOException { + if (camera != null) { + return this; + } + + camera = createCamera(); + dummySurfaceTexture = new SurfaceTexture(DUMMY_TEXTURE_NAME); + camera.setPreviewTexture(dummySurfaceTexture); + usingSurfaceTexture = true; + camera.startPreview(); + + processingThread = new Thread(processingRunnable); + processingRunnable.setActive(true); + processingThread.start(); + return this; + } + + /** + * Opens the camera and starts sending preview frames to the underlying detector. The supplied + * surface holder is used for the preview so frames can be displayed to the user. + * + * @param surfaceHolder the surface holder to use for the preview frames + * @throws IOException if the supplied surface holder could not be used as the preview display + */ + @RequiresPermission(Manifest.permission.CAMERA) + public synchronized CameraSource start(SurfaceHolder surfaceHolder) throws IOException { + if (camera != null) { + return this; + } + + camera = createCamera(); + camera.setPreviewDisplay(surfaceHolder); + camera.startPreview(); + + processingThread = new Thread(processingRunnable); + processingRunnable.setActive(true); + processingThread.start(); + + usingSurfaceTexture = false; + return this; + } + + /** + * Closes the camera and stops sending frames to the underlying frame detector. + * + *

This camera source may be restarted again by calling {@link #start()} or {@link + * #start(SurfaceHolder)}. + * + *

Call {@link #release()} instead to completely shut down this camera source and release the + * resources of the underlying detector. + */ + public synchronized void stop() { + processingRunnable.setActive(false); + if (processingThread != null) { + try { + // Wait for the thread to complete to ensure that we can't have multiple threads + // executing at the same time (i.e., which would happen if we called start too + // quickly after stop). + processingThread.join(); + } catch (InterruptedException e) { + Log.d(TAG, "Frame processing thread interrupted on release."); + } + processingThread = null; + } + + if (camera != null) { + camera.stopPreview(); + camera.setPreviewCallbackWithBuffer(null); + try { + if (usingSurfaceTexture) { + camera.setPreviewTexture(null); + } else { + camera.setPreviewDisplay(null); + } + } catch (Exception e) { + Log.e(TAG, "Failed to clear camera preview: " + e); + } + camera.release(); + camera = null; + } + + // Release the reference to any image buffers, since these will no longer be in use. + bytesToByteBuffer.clear(); + } + + /** Changes the facing of the camera. */ + public synchronized void setFacing(int facing) { + if ((facing != CAMERA_FACING_BACK) && (facing != CAMERA_FACING_FRONT)) { + throw new IllegalArgumentException("Invalid camera: " + facing); + } + this.facing = facing; + } + + /** Returns the preview size that is currently in use by the underlying camera. */ + public Size getPreviewSize() { + return previewSize; + } + + /** + * Returns the selected camera; one of {@link #CAMERA_FACING_BACK} or {@link + * #CAMERA_FACING_FRONT}. + */ + public int getCameraFacing() { + return facing; + } + + /** + * Opens the camera and applies the user settings. + * + * @throws IOException if camera cannot be found or preview cannot be processed + */ + @SuppressLint("InlinedApi") + private Camera createCamera() throws IOException { + int requestedCameraId = getIdForRequestedCamera(facing); + if (requestedCameraId == -1) { + throw new IOException("Could not find requested camera."); + } + Camera camera = Camera.open(requestedCameraId); + + SizePair sizePair = selectSizePair(camera, requestedPreviewWidth, requestedPreviewHeight); + if (sizePair == null) { + throw new IOException("Could not find suitable preview size."); + } + Size pictureSize = sizePair.pictureSize(); + previewSize = sizePair.previewSize(); + + int[] previewFpsRange = selectPreviewFpsRange(camera, requestedFps); + if (previewFpsRange == null) { + throw new IOException("Could not find suitable preview frames per second range."); + } + + Camera.Parameters parameters = camera.getParameters(); + + if (pictureSize != null) { + parameters.setPictureSize(pictureSize.getWidth(), pictureSize.getHeight()); + } + parameters.setPreviewSize(previewSize.getWidth(), previewSize.getHeight()); + parameters.setPreviewFpsRange( + previewFpsRange[Camera.Parameters.PREVIEW_FPS_MIN_INDEX], + previewFpsRange[Camera.Parameters.PREVIEW_FPS_MAX_INDEX]); + parameters.setPreviewFormat(ImageFormat.NV21); + + setRotation(camera, parameters, requestedCameraId); + + if (requestedAutoFocus) { + if (parameters + .getSupportedFocusModes() + .contains(Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO)) { + parameters.setFocusMode(Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO); + } else { + Log.i(TAG, "Camera auto focus is not supported on this device."); + } + } + + camera.setParameters(parameters); + + // Four frame buffers are needed for working with the camera: + // + // one for the frame that is currently being executed upon in doing detection + // one for the next pending frame to process immediately upon completing detection + // two for the frames that the camera uses to populate future preview images + // + // Through trial and error it appears that two free buffers, in addition to the two buffers + // used in this code, are needed for the camera to work properly. Perhaps the camera has + // one thread for acquiring images, and another thread for calling into user code. If only + // three buffers are used, then the camera will spew thousands of warning messages when + // detection takes a non-trivial amount of time. + camera.setPreviewCallbackWithBuffer(new CameraPreviewCallback()); + camera.addCallbackBuffer(createPreviewBuffer(previewSize)); + camera.addCallbackBuffer(createPreviewBuffer(previewSize)); + camera.addCallbackBuffer(createPreviewBuffer(previewSize)); + camera.addCallbackBuffer(createPreviewBuffer(previewSize)); + + return camera; + } + + public Camera getCamera(){ + return camera; + } + + /** + * Gets the id for the camera specified by the direction it is facing. Returns -1 if no such + * camera was found. + * + * @param facing the desired camera (front-facing or rear-facing) + */ + private static int getIdForRequestedCamera(int facing) { + CameraInfo cameraInfo = new CameraInfo(); + for (int i = 0; i < Camera.getNumberOfCameras(); ++i) { + Camera.getCameraInfo(i, cameraInfo); + if (cameraInfo.facing == facing) { + return i; + } + } + return -1; + } + + /** + * Selects the most suitable preview and picture size, given the desired width and height. + * + *

Even though we only need to find the preview size, it's necessary to find both the preview + * size and the picture size of the camera together, because these need to have the same aspect + * ratio. On some hardware, if you would only set the preview size, you will get a distorted + * image. + * + * @param camera the camera to select a preview size from + * @param desiredWidth the desired width of the camera preview frames + * @param desiredHeight the desired height of the camera preview frames + * @return the selected preview and picture size pair + */ + private static SizePair selectSizePair(Camera camera, int desiredWidth, int desiredHeight) { + List validPreviewSizes = generateValidPreviewSizeList(camera); + + // The method for selecting the best size is to minimize the sum of the differences between + // the desired values and the actual values for width and height. This is certainly not the + // only way to select the best size, but it provides a decent tradeoff between using the + // closest aspect ratio vs. using the closest pixel area. + SizePair selectedPair = null; + int minDiff = Integer.MAX_VALUE; + for (SizePair sizePair : validPreviewSizes) { + Size size = sizePair.previewSize(); + int diff = + Math.abs(size.getWidth() - desiredWidth) + Math.abs(size.getHeight() - desiredHeight); + if (diff < minDiff) { + selectedPair = sizePair; + minDiff = diff; + } + } + + return selectedPair; + } + + /** + * Stores a preview size and a corresponding same-aspect-ratio picture size. To avoid distorted + * preview images on some devices, the picture size must be set to a size that is the same aspect + * ratio as the preview size or the preview may end up being distorted. If the picture size is + * null, then there is no picture size with the same aspect ratio as the preview size. + */ + private static class SizePair { + private final Size preview; + private Size picture; + + SizePair( + Camera.Size previewSize, + @Nullable Camera.Size pictureSize) { + preview = new Size(previewSize.width, previewSize.height); + if (pictureSize != null) { + picture = new Size(pictureSize.width, pictureSize.height); + } + } + + Size previewSize() { + return preview; + } + + @Nullable + Size pictureSize() { + return picture; + } + } + + /** + * Generates a list of acceptable preview sizes. Preview sizes are not acceptable if there is not + * a corresponding picture size of the same aspect ratio. If there is a corresponding picture size + * of the same aspect ratio, the picture size is paired up with the preview size. + * + *

This is necessary because even if we don't use still pictures, the still picture size must + * be set to a size that is the same aspect ratio as the preview size we choose. Otherwise, the + * preview images may be distorted on some devices. + */ + private static List generateValidPreviewSizeList(Camera camera) { + Camera.Parameters parameters = camera.getParameters(); + List supportedPreviewSizes = + parameters.getSupportedPreviewSizes(); + List supportedPictureSizes = + parameters.getSupportedPictureSizes(); + List validPreviewSizes = new ArrayList<>(); + for (Camera.Size previewSize : supportedPreviewSizes) { + float previewAspectRatio = (float) previewSize.width / (float) previewSize.height; + + // By looping through the picture sizes in order, we favor the higher resolutions. + // We choose the highest resolution in order to support taking the full resolution + // picture later. + for (Camera.Size pictureSize : supportedPictureSizes) { + float pictureAspectRatio = (float) pictureSize.width / (float) pictureSize.height; + if (Math.abs(previewAspectRatio - pictureAspectRatio) < ASPECT_RATIO_TOLERANCE) { + validPreviewSizes.add(new SizePair(previewSize, pictureSize)); + break; + } + } + } + + // If there are no picture sizes with the same aspect ratio as any preview sizes, allow all + // of the preview sizes and hope that the camera can handle it. Probably unlikely, but we + // still account for it. + if (validPreviewSizes.size() == 0) { + Log.w(TAG, "No preview sizes have a corresponding same-aspect-ratio picture size"); + for (Camera.Size previewSize : supportedPreviewSizes) { + // The null picture size will let us know that we shouldn't set a picture size. + validPreviewSizes.add(new SizePair(previewSize, null)); + } + } + + return validPreviewSizes; + } + + /** + * Selects the most suitable preview frames per second range, given the desired frames per second. + * + * @param camera the camera to select a frames per second range from + * @param desiredPreviewFps the desired frames per second for the camera preview frames + * @return the selected preview frames per second range + */ + @SuppressLint("InlinedApi") + private static int[] selectPreviewFpsRange(Camera camera, float desiredPreviewFps) { + // The camera API uses integers scaled by a factor of 1000 instead of floating-point frame + // rates. + int desiredPreviewFpsScaled = (int) (desiredPreviewFps * 1000.0f); + + // The method for selecting the best range is to minimize the sum of the differences between + // the desired value and the upper and lower bounds of the range. This may select a range + // that the desired value is outside of, but this is often preferred. For example, if the + // desired frame rate is 29.97, the range (30, 30) is probably more desirable than the + // range (15, 30). + int[] selectedFpsRange = null; + int minDiff = Integer.MAX_VALUE; + List previewFpsRangeList = camera.getParameters().getSupportedPreviewFpsRange(); + for (int[] range : previewFpsRangeList) { + int deltaMin = desiredPreviewFpsScaled - range[Camera.Parameters.PREVIEW_FPS_MIN_INDEX]; + int deltaMax = desiredPreviewFpsScaled - range[Camera.Parameters.PREVIEW_FPS_MAX_INDEX]; + int diff = Math.abs(deltaMin) + Math.abs(deltaMax); + if (diff < minDiff) { + selectedFpsRange = range; + minDiff = diff; + } + } + return selectedFpsRange; + } + + /** + * Calculates the correct rotation for the given camera id and sets the rotation in the + * parameters. It also sets the camera's display orientation and rotation. + * + * @param parameters the camera parameters for which to set the rotation + * @param cameraId the camera id to set rotation based on + */ + private void setRotation(Camera camera, Camera.Parameters parameters, int cameraId) { + WindowManager windowManager = (WindowManager) activity.getSystemService(Context.WINDOW_SERVICE); + int degrees = 0; + int rotation = windowManager.getDefaultDisplay().getRotation(); + switch (rotation) { + case Surface.ROTATION_0: + degrees = 0; + break; + case Surface.ROTATION_90: + degrees = 90; + break; + case Surface.ROTATION_180: + degrees = 180; + break; + case Surface.ROTATION_270: + degrees = 270; + break; + default: + Log.e(TAG, "Bad rotation value: " + rotation); + } + + CameraInfo cameraInfo = new CameraInfo(); + Camera.getCameraInfo(cameraId, cameraInfo); + + int angle; + int displayAngle; + if (cameraInfo.facing == CameraInfo.CAMERA_FACING_FRONT) { + angle = (cameraInfo.orientation + degrees) % 360; + displayAngle = (360 - angle) % 360; // compensate for it being mirrored + } else { // back-facing + angle = (cameraInfo.orientation - degrees + 360) % 360; + displayAngle = angle; + } + + // This corresponds to the rotation constants. + this.rotation = angle / 90; + + camera.setDisplayOrientation(displayAngle); + parameters.setRotation(angle); + } + + /** + * Creates one buffer for the camera preview callback. The size of the buffer is based off of the + * camera preview size and the format of the camera image. + * + * @return a new preview buffer of the appropriate size for the current camera settings + */ + @SuppressLint("InlinedApi") + private byte[] createPreviewBuffer(Size previewSize) { + int bitsPerPixel = ImageFormat.getBitsPerPixel(ImageFormat.NV21); + long sizeInBits = (long) previewSize.getHeight() * previewSize.getWidth() * bitsPerPixel; + int bufferSize = (int) Math.ceil(sizeInBits / 8.0d) + 1; + + // Creating the byte array this way and wrapping it, as opposed to using .allocate(), + // should guarantee that there will be an array to work with. + byte[] byteArray = new byte[bufferSize]; + ByteBuffer buffer = ByteBuffer.wrap(byteArray); + if (!buffer.hasArray() || (buffer.array() != byteArray)) { + // I don't think that this will ever happen. But if it does, then we wouldn't be + // passing the preview content to the underlying detector later. + throw new IllegalStateException("Failed to create valid buffer for camera source."); + } + + bytesToByteBuffer.put(byteArray, buffer); + return byteArray; + } + + // ============================================================================================== + // Frame processing + // ============================================================================================== + + /** Called when the camera has a new preview frame. */ + private class CameraPreviewCallback implements Camera.PreviewCallback { + @Override + public void onPreviewFrame(byte[] data, Camera camera) { + processingRunnable.setNextFrame(data, camera); + } + } + + public void setMachineLearningFrameProcessor(VisionImageProcessor processor) { + synchronized (processorLock) { + cleanScreen(); + if (frameProcessor != null) { + frameProcessor.stop(); + } + frameProcessor = processor; + } + } + + public VisionImageProcessor getMachineLearningFrameProcessor() { + return frameProcessor; + } + + /** + * This runnable controls access to the underlying receiver, calling it to process frames when + * available from the camera. This is designed to run detection on frames as fast as possible + * (i.e., without unnecessary context switching or waiting on the next frame). + * + *

While detection is running on a frame, new frames may be received from the camera. As these + * frames come in, the most recent frame is held onto as pending. As soon as detection and its + * associated processing is done for the previous frame, detection on the mostly recently received + * frame will immediately start on the same thread. + */ + private class FrameProcessingRunnable implements Runnable { + + // This lock guards all of the member variables below. + private final Object lock = new Object(); + private boolean active = true; + + // These pending variables hold the state associated with the new frame awaiting processing. + private ByteBuffer pendingFrameData; + + FrameProcessingRunnable() {} + + /** + * Releases the underlying receiver. This is only safe to do after the associated thread has + * completed, which is managed in camera source's release method above. + */ + @SuppressLint("Assert") + void release() { + assert (processingThread.getState() == State.TERMINATED); + } + + /** Marks the runnable as active/not active. Signals any blocked threads to continue. */ + void setActive(boolean active) { + synchronized (lock) { + this.active = active; + lock.notifyAll(); + } + } + + /** + * Sets the frame data received from the camera. This adds the previous unused frame buffer (if + * present) back to the camera, and keeps a pending reference to the frame data for future use. + */ + void setNextFrame(byte[] data, Camera camera) { + synchronized (lock) { + if (pendingFrameData != null) { + camera.addCallbackBuffer(pendingFrameData.array()); + pendingFrameData = null; + } + + if (!bytesToByteBuffer.containsKey(data)) { + Log.d( + TAG, + "Skipping frame. Could not find ByteBuffer associated with the image " + + "data from the camera."); + return; + } + + pendingFrameData = bytesToByteBuffer.get(data); + + // Notify the processor thread if it is waiting on the next frame (see below). + lock.notifyAll(); + } + } + + /** + * As long as the processing thread is active, this executes detection on frames continuously. + * The next pending frame is either immediately available or hasn't been received yet. Once it + * is available, we transfer the frame info to local variables and run detection on that frame. + * It immediately loops back for the next frame without pausing. + * + *

If detection takes longer than the time in between new frames from the camera, this will + * mean that this loop will run without ever waiting on a frame, avoiding any context switching + * or frame acquisition time latency. + * + *

If you find that this is using more CPU than you'd like, you should probably decrease the + * FPS setting above to allow for some idle time in between frames. + */ + @SuppressLint("InlinedApi") + @SuppressWarnings("GuardedBy") + @Override + public void run() { + ByteBuffer data; + + while (true) { + synchronized (lock) { + while (active && (pendingFrameData == null)) { + try { + // Wait for the next frame to be received from the camera, since we + // don't have it yet. + lock.wait(); + } catch (InterruptedException e) { + Log.d(TAG, "Frame processing loop terminated.", e); + return; + } + } + + if (!active) { + // Exit the loop once this camera source is stopped or released. We check + // this here, immediately after the wait() above, to handle the case where + // setActive(false) had been called, triggering the termination of this + // loop. + return; + } + + // Hold onto the frame data locally, so that we can use this for detection + // below. We need to clear pendingFrameData to ensure that this buffer isn't + // recycled back to the camera before we are done using that data. + data = pendingFrameData; + pendingFrameData = null; + } + + // The code below needs to run outside of synchronization, because this will allow + // the camera to add pending frame(s) while we are running detection on the current + // frame. + + try { + synchronized (processorLock) { + Log.d(TAG, "Process an image"); + frameProcessor.process( + data, + new FrameMetadata.Builder() + .setWidth(previewSize.getWidth()) + .setHeight(previewSize.getHeight()) + .setRotation(rotation) + .setCameraFacing(facing) + .build(), + graphicOverlay); + } + } catch (Throwable t) { + Log.e(TAG, "Exception thrown from receiver.", t); + } finally { + camera.addCallbackBuffer(data.array()); + } + } + } + } + + /** Cleans up graphicOverlay and child classes can do their cleanups as well . */ + private void cleanScreen() { + graphicOverlay.clear(); + } +} diff --git a/app/src/main/java/com/themon/test/mlkit_faciallandmarks/common/CameraSourcePreview.java b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/common/CameraSourcePreview.java new file mode 100644 index 0000000..90bfd05 --- /dev/null +++ b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/common/CameraSourcePreview.java @@ -0,0 +1,180 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package com.themon.test.mlkit_faciallandmarks.common; + +import android.annotation.SuppressLint; +import android.content.Context; +import android.content.res.Configuration; +import android.util.AttributeSet; +import android.util.Log; +import android.view.SurfaceHolder; +import android.view.SurfaceView; +import android.view.ViewGroup; + +import com.google.android.gms.common.images.Size; + +import java.io.IOException; + +/** Preview the camera image in the screen. */ +public class CameraSourcePreview extends ViewGroup { + private static final String TAG = "MIDemoApp:Preview"; + + private Context context; + private SurfaceView surfaceView; + private boolean startRequested; + private boolean surfaceAvailable; + private CameraSource cameraSource; + + private GraphicOverlay overlay; + + public CameraSourcePreview(Context context, AttributeSet attrs) { + super(context, attrs); + this.context = context; + startRequested = false; + surfaceAvailable = false; + + surfaceView = new SurfaceView(context); + surfaceView.getHolder().addCallback(new SurfaceCallback()); + addView(surfaceView); + } + + public void start(CameraSource cameraSource) throws IOException { + if (cameraSource == null) { + stop(); + } + + this.cameraSource = cameraSource; + + if (this.cameraSource != null) { + startRequested = true; + startIfReady(); + } + } + + public void start(CameraSource cameraSource, GraphicOverlay overlay) throws IOException { + this.overlay = overlay; + start(cameraSource); + } + + public void stop() { + if (cameraSource != null) { + cameraSource.stop(); + } + } + + public void release() { + if (cameraSource != null) { + cameraSource.release(); + cameraSource = null; + } + } + + @SuppressLint("MissingPermission") + private void startIfReady() throws IOException { + if (startRequested && surfaceAvailable) { + cameraSource.start(); + if (overlay != null) { + Size size = cameraSource.getPreviewSize(); + int min = Math.min(size.getWidth(), size.getHeight()); + int max = Math.max(size.getWidth(), size.getHeight()); + if (isPortraitMode()) { + // Swap width and height sizes when in portrait, since it will be rotated by + // 90 degrees + overlay.setCameraInfo(min, max, cameraSource.getCameraFacing()); + } else { + overlay.setCameraInfo(max, min, cameraSource.getCameraFacing()); + } + overlay.clear(); + } + startRequested = false; + } + } + + private class SurfaceCallback implements SurfaceHolder.Callback { + @Override + public void surfaceCreated(SurfaceHolder surface) { + surfaceAvailable = true; + try { + startIfReady(); + } catch (IOException e) { + Log.e(TAG, "Could not start camera source.", e); + } + } + + @Override + public void surfaceDestroyed(SurfaceHolder surface) { + surfaceAvailable = false; + } + + @Override + public void surfaceChanged(SurfaceHolder holder, int format, int width, int height) {} + } + + @Override + protected void onLayout(boolean changed, int left, int top, int right, int bottom) { + int width = 320; + int height = 240; + if (cameraSource != null) { + Size size = cameraSource.getPreviewSize(); + if (size != null) { + width = size.getWidth(); + height = size.getHeight(); + } + } + + // Swap width and height sizes when in portrait, since it will be rotated 90 degrees + if (isPortraitMode()) { + int tmp = width; + width = height; + height = tmp; + } + + final int layoutWidth = right - left; + final int layoutHeight = bottom - top; + + // Computes height and width for potentially doing fit width. + int childWidth = layoutWidth; + int childHeight = (int) (((float) layoutWidth / (float) width) * height); + + // If height is too tall using fit width, does fit height instead. + if (childHeight > layoutHeight) { + childHeight = layoutHeight; + childWidth = (int) (((float) layoutHeight / (float) height) * width); + } + + for (int i = 0; i < getChildCount(); ++i) { + getChildAt(i).layout(0, 0, childWidth, childHeight); + Log.d(TAG, "Assigned view: " + i); + } + + try { + startIfReady(); + } catch (IOException e) { + Log.e(TAG, "Could not start camera source.", e); + } + } + + private boolean isPortraitMode() { + int orientation = context.getResources().getConfiguration().orientation; + if (orientation == Configuration.ORIENTATION_LANDSCAPE) { + return false; + } + if (orientation == Configuration.ORIENTATION_PORTRAIT) { + return true; + } + + Log.d(TAG, "isPortraitMode returning false by default"); + return false; + } +} diff --git a/app/src/main/java/com/themon/test/mlkit_faciallandmarks/common/FrameMetadata.java b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/common/FrameMetadata.java new file mode 100644 index 0000000..7a66f79 --- /dev/null +++ b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/common/FrameMetadata.java @@ -0,0 +1,79 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package com.themon.test.mlkit_faciallandmarks.common; + +/** Describing a frame info. */ +public class FrameMetadata { + + private final int width; + private final int height; + private final int rotation; + private final int cameraFacing; + + public int getWidth() { + return width; + } + + public int getHeight() { + return height; + } + + public int getRotation() { + return rotation; + } + + public int getCameraFacing() { + return cameraFacing; + } + + private FrameMetadata(int width, int height, int rotation, int facing) { + this.width = width; + this.height = height; + this.rotation = rotation; + cameraFacing = facing; + } + + /** Builder of {@link FrameMetadata}. */ + public static class Builder { + + private int width; + private int height; + private int rotation; + private int cameraFacing; + + public Builder setWidth(int width) { + this.width = width; + return this; + } + + public Builder setHeight(int height) { + this.height = height; + return this; + } + + public Builder setRotation(int rotation) { + this.rotation = rotation; + return this; + } + + public Builder setCameraFacing(int facing) { + cameraFacing = facing; + return this; + } + + public FrameMetadata build() { + return new FrameMetadata(width, height, rotation, cameraFacing); + } + } +} diff --git a/app/src/main/java/com/themon/test/mlkit_faciallandmarks/common/GraphicOverlay.java b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/common/GraphicOverlay.java new file mode 100644 index 0000000..ff8ce3b --- /dev/null +++ b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/common/GraphicOverlay.java @@ -0,0 +1,177 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package com.themon.test.mlkit_faciallandmarks.common; + +import android.content.Context; +import android.graphics.Canvas; +import android.util.AttributeSet; +import android.view.View; + +import com.google.android.gms.vision.CameraSource; + +import java.util.ArrayList; +import java.util.List; + +/** + * A view which renders a series of custom graphics to be overlayed on top of an associated preview + * (i.e., the camera preview). The creator can add graphics objects, update the objects, and remove + * them, triggering the appropriate drawing and invalidation within the view. + * + *

Supports scaling and mirroring of the graphics relative the camera's preview properties. The + * idea is that detection items are expressed in terms of a preview size, but need to be scaled up + * to the full view size, and also mirrored in the case of the front-facing camera. + * + *

Associated {@link Graphic} items should use the following methods to convert to view + * coordinates for the graphics that are drawn: + * + *

    + *
  1. {@link Graphic#scaleX(float)} and {@link Graphic#scaleY(float)} adjust the size of the + * supplied value from the preview scale to the view scale. + *
  2. {@link Graphic#translateX(float)} and {@link Graphic#translateY(float)} adjust the + * coordinate from the preview's coordinate system to the view coordinate system. + *
+ */ +public class GraphicOverlay extends View { + private final Object lock = new Object(); + private int previewWidth; + private float widthScaleFactor = 1.0f; + private int previewHeight; + private float heightScaleFactor = 1.0f; + private int facing = CameraSource.CAMERA_FACING_BACK; + private final List graphics = new ArrayList<>(); + + /** + * Base class for a custom graphics object to be rendered within the graphic overlay. Subclass + * this and implement the {@link Graphic#draw(Canvas)} method to define the graphics element. Add + * instances to the overlay using {@link GraphicOverlay#add(Graphic)}. + */ + public abstract static class Graphic { + private GraphicOverlay overlay; + + public Graphic(GraphicOverlay overlay) { + this.overlay = overlay; + } + + /** + * Draw the graphic on the supplied canvas. Drawing should use the following methods to convert + * to view coordinates for the graphics that are drawn: + * + *
    + *
  1. {@link Graphic#scaleX(float)} and {@link Graphic#scaleY(float)} adjust the size of the + * supplied value from the preview scale to the view scale. + *
  2. {@link Graphic#translateX(float)} and {@link Graphic#translateY(float)} adjust the + * coordinate from the preview's coordinate system to the view coordinate system. + *
+ * + * @param canvas drawing canvas + */ + public abstract void draw(Canvas canvas); + + /** + * Adjusts a horizontal value of the supplied value from the preview scale to the view scale. + */ + public float scaleX(float horizontal) { + return horizontal * overlay.widthScaleFactor; + } + + /** Adjusts a vertical value of the supplied value from the preview scale to the view scale. */ + public float scaleY(float vertical) { + return vertical * overlay.heightScaleFactor; + } + + /** Returns the application context of the app. */ + public Context getApplicationContext() { + return overlay.getContext().getApplicationContext(); + } + + /** + * Adjusts the x coordinate from the preview's coordinate system to the view coordinate system. + */ + public float translateX(float x) { + if (overlay.facing == CameraSource.CAMERA_FACING_FRONT) { + return overlay.getWidth() - scaleX(x); + } else { + return scaleX(x); + } + } + + /** + * Adjusts the y coordinate from the preview's coordinate system to the view coordinate system. + */ + public float translateY(float y) { + return scaleY(y); + } + + public void postInvalidate() { + overlay.postInvalidate(); + } + } + + public GraphicOverlay(Context context, AttributeSet attrs) { + super(context, attrs); + } + + /** Removes all graphics from the overlay. */ + public void clear() { + synchronized (lock) { + graphics.clear(); + } + postInvalidate(); + } + + /** Adds a graphic to the overlay. */ + public void add(Graphic graphic) { + synchronized (lock) { + graphics.add(graphic); + } + } + + /** Removes a graphic from the overlay. */ + public void remove(Graphic graphic) { + synchronized (lock) { + graphics.remove(graphic); + } + postInvalidate(); + } + + /** + * Sets the camera attributes for size and facing direction, which informs how to transform image + * coordinates later. + */ + public void setCameraInfo(int previewWidth, int previewHeight, int facing) { + synchronized (lock) { + this.previewWidth = previewWidth; + this.previewHeight = previewHeight; + this.facing = facing; + } + postInvalidate(); + } + + /** Draws the overlay with its associated graphic objects. */ + @Override + protected void onDraw(Canvas canvas) { + super.onDraw(canvas); + + synchronized (lock) { + if ((previewWidth != 0) && (previewHeight != 0)) { + widthScaleFactor = (float) canvas.getWidth() / (float) previewWidth; + heightScaleFactor = (float) canvas.getHeight() / (float) previewHeight; + } + + for (Graphic graphic : graphics) { + graphic.draw(canvas); + } + } + } +} diff --git a/app/src/main/java/com/themon/test/mlkit_faciallandmarks/common/VisionImageProcessor.java b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/common/VisionImageProcessor.java new file mode 100644 index 0000000..6265450 --- /dev/null +++ b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/common/VisionImageProcessor.java @@ -0,0 +1,40 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package com.themon.test.mlkit_faciallandmarks.common; + +import android.graphics.Bitmap; + +import com.google.firebase.ml.common.FirebaseMLException; +import com.google.firebase.ml.vision.face.FirebaseVisionFace; + +import java.nio.ByteBuffer; +import java.util.List; + +/** An inferface to process the images with different ML Kit detectors and custom image models. */ +public interface VisionImageProcessor { + + ByteBuffer getLatestImage(); + FrameMetadata getLatestImageMetadata(); + List getFaces(); + + /** Processes the images with the underlying machine learning models. */ + void process(ByteBuffer data, FrameMetadata frameMetadata, GraphicOverlay graphicOverlay) + throws FirebaseMLException; + + /** Processes the bitmap images. */ + void process(Bitmap bitmap, GraphicOverlay graphicOverlay); + + /** Stops the underlying machine learning model and release resources. */ + void stop(); +} diff --git a/app/src/main/java/com/themon/test/mlkit_faciallandmarks/facedetection/FaceContourDetectorProcessor.java b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/facedetection/FaceContourDetectorProcessor.java new file mode 100644 index 0000000..0909590 --- /dev/null +++ b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/facedetection/FaceContourDetectorProcessor.java @@ -0,0 +1,90 @@ +package com.themon.test.mlkit_faciallandmarks.facedetection; + +import android.graphics.Bitmap; +import android.support.annotation.NonNull; +import android.support.annotation.Nullable; +import android.util.Log; + +import com.google.android.gms.tasks.Task; +import com.google.firebase.ml.vision.FirebaseVision; +import com.google.firebase.ml.vision.common.FirebaseVisionImage; +import com.google.firebase.ml.vision.face.FirebaseVisionFace; +import com.google.firebase.ml.vision.face.FirebaseVisionFaceDetector; +import com.google.firebase.ml.vision.face.FirebaseVisionFaceDetectorOptions; +import com.themon.test.mlkit_faciallandmarks.VisionProcessorBase; +import com.themon.test.mlkit_faciallandmarks.common.CameraImageGraphic; +import com.themon.test.mlkit_faciallandmarks.common.FrameMetadata; +import com.themon.test.mlkit_faciallandmarks.common.GraphicOverlay; + +import java.io.IOException; +import java.util.List; + +/** + * Face Contour Demo. + */ +public class FaceContourDetectorProcessor extends VisionProcessorBase> { + + private static final String TAG = "FaceContourDetectorProc"; + + private final FirebaseVisionFaceDetector detector; + //private PersonRecognizer mPersonRecog; + + public FaceContourDetectorProcessor(/*PersonRecognizer pr*/) { + FirebaseVisionFaceDetectorOptions options = + new FirebaseVisionFaceDetectorOptions.Builder() + .setPerformanceMode(FirebaseVisionFaceDetectorOptions.FAST) + .setContourMode(FirebaseVisionFaceDetectorOptions.ALL_CONTOURS) + //.setClassificationMode(FirebaseVisionFaceDetectorOptions.ALL_CLASSIFICATIONS) + .build(); + + detector = FirebaseVision.getInstance().getVisionFaceDetector(options); + //mPersonRecog = pr; + } + + @Override + public void stop() { + try { + detector.close(); + } catch (IOException e) { + Log.e(TAG, "Exception thrown while trying to close Face Contour Detector: " + e); + } + } + + @Override + protected Task> detectInImage(FirebaseVisionImage image) { + return detector.detectInImage(image); + } + + @Override + protected void onSuccess( + @Nullable Bitmap originalCameraImage, + @NonNull List faces, + @NonNull FrameMetadata frameMetadata, + @NonNull GraphicOverlay graphicOverlay) { + this.faces = faces; + //Mat m = new Mat(); + graphicOverlay.clear(); + if (originalCameraImage != null) { + CameraImageGraphic imageGraphic = new CameraImageGraphic(graphicOverlay, originalCameraImage); + graphicOverlay.add(imageGraphic); + //Utils.bitmapToMat(originalCameraImage, m); + } + for (int i = 0; i < faces.size(); ++i) { + FirebaseVisionFace face = faces.get(i); + /*Rect bbox = face.getBoundingBox(); + Mat subm = new Mat(m, new org.opencv.core.Rect(bbox.left, bbox.top, bbox.width(), bbox.height())); + String label = mPersonRecog.predict(subm); + */ + FaceContourGraphic faceGraphic = new FaceContourGraphic(graphicOverlay, face); + graphicOverlay.add(faceGraphic); + } + graphicOverlay.postInvalidate(); + //m.release(); + } + + @Override + protected void onFailure(@NonNull Exception e) { + Log.e(TAG, "Face detection failed " + e); + } + +} \ No newline at end of file diff --git a/app/src/main/java/com/themon/test/mlkit_faciallandmarks/facedetection/FaceContourGraphic.java b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/facedetection/FaceContourGraphic.java new file mode 100644 index 0000000..e3dbbcd --- /dev/null +++ b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/facedetection/FaceContourGraphic.java @@ -0,0 +1,144 @@ +package com.themon.test.mlkit_faciallandmarks.facedetection; + +import android.graphics.Canvas; +import android.graphics.Color; +import android.graphics.Paint; + +import com.google.firebase.ml.vision.face.FirebaseVisionFace; +import com.google.firebase.ml.vision.face.FirebaseVisionFaceContour; +import com.google.firebase.ml.vision.face.FirebaseVisionFaceLandmark; +import com.themon.test.mlkit_faciallandmarks.common.GraphicOverlay; +import com.themon.test.mlkit_faciallandmarks.common.GraphicOverlay.Graphic; + +/** Graphic instance for rendering face contours graphic overlay view. */ +public class FaceContourGraphic extends Graphic { + + private static final float FACE_POSITION_RADIUS = 4.0f; + private static final float ID_TEXT_SIZE = 30.0f; + private static final float ID_Y_OFFSET = 80.0f; + private static final float ID_X_OFFSET = -70.0f; + private static final float BOX_STROKE_WIDTH = 5.0f; + private static final float THRESHOLD = 0.10f; + + private final Paint facePositionPaint; + private final Paint idPaint; + private final Paint boxPaint; + + private volatile FirebaseVisionFace firebaseVisionFace; + + public FaceContourGraphic(GraphicOverlay overlay, FirebaseVisionFace face) { + super(overlay); + + this.firebaseVisionFace = face; + final int selectedColor = Color.WHITE; + + facePositionPaint = new Paint(); + facePositionPaint.setColor(selectedColor); + + idPaint = new Paint(); + idPaint.setColor(selectedColor); + idPaint.setTextSize(ID_TEXT_SIZE); + + boxPaint = new Paint(); + boxPaint.setColor(selectedColor); + boxPaint.setStyle(Paint.Style.STROKE); + boxPaint.setStrokeWidth(BOX_STROKE_WIDTH); + } + + /** Draws the face annotations for position on the supplied canvas. */ + @Override + public void draw(Canvas canvas) { + FirebaseVisionFace face = firebaseVisionFace; + if (face == null) { + return; + } + + // Draws a circle at the position of the detected face, with the face's track id below. + float x = translateX(face.getBoundingBox().centerX()); + float y = translateY(face.getBoundingBox().centerY()); + canvas.drawCircle(x, y, FACE_POSITION_RADIUS, facePositionPaint); + canvas.drawText("id: " + face.getTrackingId(), x + ID_X_OFFSET, y + ID_Y_OFFSET, idPaint); + + // Draws a bounding box around the face. + float xOffset = scaleX(face.getBoundingBox().width() / 2.0f); + float yOffset = scaleY(face.getBoundingBox().height() / 2.0f); + float left = x - xOffset; + float top = y - yOffset; + float right = x + xOffset; + float bottom = y + yOffset; + canvas.drawRect(left, top, right, bottom, boxPaint); + + FirebaseVisionFaceContour contour = face.getContour(FirebaseVisionFaceContour.ALL_POINTS); + for (com.google.firebase.ml.vision.common.FirebaseVisionPoint point : contour.getPoints()) { + float px = translateX(point.getX()); + float py = translateY(point.getY()); + canvas.drawCircle(px, py, FACE_POSITION_RADIUS, facePositionPaint); + } + + String text = ""; + if (face.getSmilingProbability() >= 0) { + canvas.drawText( + "happiness: " + String.format("%.2f", face.getSmilingProbability()), + x + ID_X_OFFSET * 3, + y - ID_Y_OFFSET, + idPaint); + } + + if (face.getRightEyeOpenProbability() >= 0) { + canvas.drawText( + "left eye: " + String.format("%.2f", face.getRightEyeOpenProbability()), + x + ID_X_OFFSET * 6, + y, + idPaint); + if (face.getRightEyeOpenProbability() <= THRESHOLD) { + text += "Left eye blinked. "; + } + } + if (face.getLeftEyeOpenProbability() >= 0) { + canvas.drawText( + "right eye: " + String.format("%.2f", face.getLeftEyeOpenProbability()), + x - ID_X_OFFSET, + y, + idPaint); + if (face.getLeftEyeOpenProbability() <= THRESHOLD) { + text += "Right eye blinked. "; + } + } + canvas.drawText(text, 40f, 30f, idPaint); + + FirebaseVisionFaceLandmark leftEye = face.getLandmark(FirebaseVisionFaceLandmark.LEFT_EYE); + if (leftEye != null && leftEye.getPosition() != null) { + canvas.drawCircle( + translateX(leftEye.getPosition().getX()), + translateY(leftEye.getPosition().getY()), + FACE_POSITION_RADIUS, + facePositionPaint); + } + FirebaseVisionFaceLandmark rightEye = face.getLandmark(FirebaseVisionFaceLandmark.RIGHT_EYE); + if (rightEye != null && rightEye.getPosition() != null) { + canvas.drawCircle( + translateX(rightEye.getPosition().getX()), + translateY(rightEye.getPosition().getY()), + FACE_POSITION_RADIUS, + facePositionPaint); + } + + FirebaseVisionFaceLandmark leftCheek = face.getLandmark(FirebaseVisionFaceLandmark.LEFT_CHEEK); + if (leftCheek != null && leftCheek.getPosition() != null) { + canvas.drawCircle( + translateX(leftCheek.getPosition().getX()), + translateY(leftCheek.getPosition().getY()), + FACE_POSITION_RADIUS, + facePositionPaint); + } + FirebaseVisionFaceLandmark rightCheek = + face.getLandmark(FirebaseVisionFaceLandmark.RIGHT_CHEEK); + if (rightCheek != null && rightCheek.getPosition() != null) { + canvas.drawCircle( + translateX(rightCheek.getPosition().getX()), + translateY(rightCheek.getPosition().getY()), + FACE_POSITION_RADIUS, + facePositionPaint); + } + } +} diff --git a/app/src/main/java/com/themon/test/mlkit_faciallandmarks/facedetection/FaceDetectionProcessor.java b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/facedetection/FaceDetectionProcessor.java new file mode 100644 index 0000000..0cbb348 --- /dev/null +++ b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/facedetection/FaceDetectionProcessor.java @@ -0,0 +1,95 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package com.themon.test.mlkit_faciallandmarks.facedetection; + +import android.graphics.Bitmap; +import android.hardware.Camera; +import android.support.annotation.NonNull; +import android.support.annotation.Nullable; +import android.util.Log; + +import com.google.android.gms.tasks.Task; +import com.google.firebase.ml.vision.FirebaseVision; +import com.google.firebase.ml.vision.common.FirebaseVisionImage; +import com.google.firebase.ml.vision.face.FirebaseVisionFace; +import com.google.firebase.ml.vision.face.FirebaseVisionFaceDetector; +import com.google.firebase.ml.vision.face.FirebaseVisionFaceDetectorOptions; +import com.themon.test.mlkit_faciallandmarks.common.CameraImageGraphic; +import com.themon.test.mlkit_faciallandmarks.common.FrameMetadata; +import com.themon.test.mlkit_faciallandmarks.common.GraphicOverlay; +import com.themon.test.mlkit_faciallandmarks.VisionProcessorBase; + +import java.io.IOException; +import java.util.List; + +/** + * Face Detector Demo. + */ +public class FaceDetectionProcessor extends VisionProcessorBase> { + + private static final String TAG = "FaceDetectionProcessor"; + + private final FirebaseVisionFaceDetector detector; + + public FaceDetectionProcessor() { + FirebaseVisionFaceDetectorOptions options = + new FirebaseVisionFaceDetectorOptions.Builder() + .setClassificationMode(FirebaseVisionFaceDetectorOptions.ALL_CLASSIFICATIONS) + .build(); + + detector = FirebaseVision.getInstance().getVisionFaceDetector(options); + } + + @Override + public void stop() { + try { + detector.close(); + } catch (IOException e) { + Log.e(TAG, "Exception thrown while trying to close Face Detector: " + e); + } + } + + @Override + protected Task> detectInImage(FirebaseVisionImage image) { + return detector.detectInImage(image); + } + + @Override + protected void onSuccess( + @Nullable Bitmap originalCameraImage, + @NonNull List faces, + @NonNull FrameMetadata frameMetadata, + @NonNull GraphicOverlay graphicOverlay) { + graphicOverlay.clear(); + if (originalCameraImage != null) { + CameraImageGraphic imageGraphic = new CameraImageGraphic(graphicOverlay, originalCameraImage); + graphicOverlay.add(imageGraphic); + } + for (int i = 0; i < faces.size(); ++i) { + FirebaseVisionFace face = faces.get(i); + + int cameraFacing = + frameMetadata != null ? frameMetadata.getCameraFacing() : + Camera.CameraInfo.CAMERA_FACING_BACK; + FaceGraphic faceGraphic = new FaceGraphic(graphicOverlay, face, cameraFacing); + graphicOverlay.add(faceGraphic); + } + graphicOverlay.postInvalidate(); + } + + @Override + protected void onFailure(@NonNull Exception e) { + Log.e(TAG, "Face detection failed " + e); + } +} diff --git a/app/src/main/java/com/themon/test/mlkit_faciallandmarks/facedetection/FaceGraphic.java b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/facedetection/FaceGraphic.java new file mode 100644 index 0000000..ba56042 --- /dev/null +++ b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/facedetection/FaceGraphic.java @@ -0,0 +1,143 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.themon.test.mlkit_faciallandmarks.facedetection; + +import android.graphics.Canvas; +import android.graphics.Color; +import android.graphics.Paint; + +import com.google.android.gms.vision.CameraSource; +import com.google.firebase.ml.vision.common.FirebaseVisionPoint; +import com.google.firebase.ml.vision.face.FirebaseVisionFace; +import com.google.firebase.ml.vision.face.FirebaseVisionFaceLandmark; +import com.themon.test.mlkit_faciallandmarks.common.GraphicOverlay; +import com.themon.test.mlkit_faciallandmarks.common.GraphicOverlay.Graphic; + +/** + * Graphic instance for rendering face position, orientation, and landmarks within an associated + * graphic overlay view. + */ +public class FaceGraphic extends Graphic { + private static final float FACE_POSITION_RADIUS = 4.0f; + private static final float ID_TEXT_SIZE = 30.0f; + private static final float ID_Y_OFFSET = 50.0f; + private static final float ID_X_OFFSET = -50.0f; + private static final float BOX_STROKE_WIDTH = 5.0f; + + private int facing; + + private final Paint facePositionPaint; + private final Paint idPaint; + private final Paint boxPaint; + + private volatile FirebaseVisionFace firebaseVisionFace; + + public FaceGraphic(GraphicOverlay overlay, FirebaseVisionFace face, int facing) { + super(overlay); + + firebaseVisionFace = face; + this.facing = facing; + final int selectedColor = Color.WHITE; + + facePositionPaint = new Paint(); + facePositionPaint.setColor(selectedColor); + + idPaint = new Paint(); + idPaint.setColor(selectedColor); + idPaint.setTextSize(ID_TEXT_SIZE); + + boxPaint = new Paint(); + boxPaint.setColor(selectedColor); + boxPaint.setStyle(Paint.Style.STROKE); + boxPaint.setStrokeWidth(BOX_STROKE_WIDTH); + } + + /** + * Draws the face annotations for position on the supplied canvas. + */ + @Override + public void draw(Canvas canvas) { + FirebaseVisionFace face = firebaseVisionFace; + if (face == null) { + return; + } + + // Draws a circle at the position of the detected face, with the face's track id below. + float x = translateX(face.getBoundingBox().centerX()); + float y = translateY(face.getBoundingBox().centerY()); + canvas.drawCircle(x, y, FACE_POSITION_RADIUS, facePositionPaint); + canvas.drawText("id: " + face.getTrackingId(), x + ID_X_OFFSET, y + ID_Y_OFFSET, idPaint); + canvas.drawText( + "happiness: " + String.format("%.2f", face.getSmilingProbability()), + x + ID_X_OFFSET * 3, + y - ID_Y_OFFSET, + idPaint); + if (facing == CameraSource.CAMERA_FACING_FRONT) { + canvas.drawText( + "right eye: " + String.format("%.2f", face.getRightEyeOpenProbability()), + x - ID_X_OFFSET, + y, + idPaint); + canvas.drawText( + "left eye: " + String.format("%.2f", face.getLeftEyeOpenProbability()), + x + ID_X_OFFSET * 6, + y, + idPaint); + } else { + canvas.drawText( + "left eye: " + String.format("%.2f", face.getLeftEyeOpenProbability()), + x - ID_X_OFFSET, + y, + idPaint); + canvas.drawText( + "right eye: " + String.format("%.2f", face.getRightEyeOpenProbability()), + x + ID_X_OFFSET * 6, + y, + idPaint); + } + + // Draws a bounding box around the face. + float xOffset = scaleX(face.getBoundingBox().width() / 2.0f); + float yOffset = scaleY(face.getBoundingBox().height() / 2.0f); + float left = x - xOffset; + float top = y - yOffset; + float right = x + xOffset; + float bottom = y + yOffset; + canvas.drawRect(left, top, right, bottom, boxPaint); + + // draw landmarks + drawLandmarkPosition(canvas, face, FirebaseVisionFaceLandmark.MOUTH_BOTTOM); + drawLandmarkPosition(canvas, face, FirebaseVisionFaceLandmark.LEFT_CHEEK); + drawLandmarkPosition(canvas, face, FirebaseVisionFaceLandmark.LEFT_EAR); + drawLandmarkPosition(canvas, face, FirebaseVisionFaceLandmark.MOUTH_LEFT); + drawLandmarkPosition(canvas, face, FirebaseVisionFaceLandmark.LEFT_EYE); + drawLandmarkPosition(canvas, face, FirebaseVisionFaceLandmark.NOSE_BASE); + drawLandmarkPosition(canvas, face, FirebaseVisionFaceLandmark.RIGHT_CHEEK); + drawLandmarkPosition(canvas, face, FirebaseVisionFaceLandmark.RIGHT_EAR); + drawLandmarkPosition(canvas, face, FirebaseVisionFaceLandmark.RIGHT_EYE); + drawLandmarkPosition(canvas, face, FirebaseVisionFaceLandmark.MOUTH_RIGHT); + } + + private void drawLandmarkPosition(Canvas canvas, FirebaseVisionFace face, int landmarkID) { + FirebaseVisionFaceLandmark landmark = face.getLandmark(landmarkID); + if (landmark != null) { + FirebaseVisionPoint point = landmark.getPosition(); + canvas.drawCircle( + translateX(point.getX()), + translateY(point.getY()), + 10f, idPaint); + } + } +} diff --git a/app/src/main/java/com/themon/test/mlkit_faciallandmarks/recognizeAsync.java b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/recognizeAsync.java new file mode 100644 index 0000000..2c48446 --- /dev/null +++ b/app/src/main/java/com/themon/test/mlkit_faciallandmarks/recognizeAsync.java @@ -0,0 +1,4 @@ +package com.themon.test.mlkit_faciallandmarks; + +interface recognizeAsync { +} diff --git a/app/src/main/java/com/tzutalin/dlib/AddPerson.java b/app/src/main/java/com/tzutalin/dlib/AddPerson.java new file mode 100644 index 0000000..fc0e476 --- /dev/null +++ b/app/src/main/java/com/tzutalin/dlib/AddPerson.java @@ -0,0 +1,248 @@ +/** + * Created by Gaurav on Feb 23, 2018 + */ + +package com.tzutalin.dlib; + +import android.Manifest; +import android.app.ProgressDialog; +import android.content.DialogInterface; +import android.content.Intent; +import android.content.pm.PackageManager; +import android.database.Cursor; +import android.graphics.Bitmap; +import android.net.Uri; +import android.os.AsyncTask; +import android.os.Bundle; +import android.provider.MediaStore; +import android.support.v7.app.AlertDialog; +import android.support.v7.app.AppCompatActivity; +import android.text.Editable; +import android.text.TextWatcher; +import android.view.View; +import android.widget.Button; +import android.widget.EditText; +import android.widget.Toast; + +import com.themon.test.mlkit_faciallandmarks.MainActivity; +import com.themon.test.mlkit_faciallandmarks.R; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.util.List; + +// Copy the person image renamed to his name into the dlib image directory +public class AddPerson extends AppCompatActivity { + + EditText et_name, et_image; + Button btn_select_image, btn_add; + int BITMAP_QUALITY = 100; + int MAX_IMAGE_SIZE = 500; + String TAG = "AddPerson"; + private Bitmap bitmap; + private File destination = null; + private String imgPath = null; + private final int PICK_IMAGE_CAMERA = 1, PICK_IMAGE_GALLERY = 2; + + @Override + protected void onCreate(Bundle savedInstanceState) { + super.onCreate(savedInstanceState); + setContentView(R.layout.activity_add_person); + + btn_select_image = (Button)findViewById(R.id.btn_select_image); + btn_add = (Button)findViewById(R.id.btn_add); + et_name = (EditText)findViewById(R.id.et_name); + et_image = (EditText)findViewById(R.id.et_image); + + btn_select_image.setOnClickListener(mOnClickListener); + btn_add.setOnClickListener(mOnClickListener); + btn_add.setEnabled(false); + + et_name.addTextChangedListener(new TextWatcher() { + @Override + public void afterTextChanged(Editable arg0) { + imgPath = null; + et_image.setText(""); + enableSubmitIfReady(); + } + + @Override + public void beforeTextChanged(CharSequence s, int start, int count, int after) { + } + + @Override + public void onTextChanged(CharSequence s, int start, int before, int count) { + } + }); + + destination = new File(Constants.getDLibDirectoryPath() + "/temp.jpg"); + } + + public void enableSubmitIfReady() { + boolean isReady = et_name.getText().toString().length() > 0 && imgPath!=null; + btn_add.setEnabled(isReady); + } + + private View.OnClickListener mOnClickListener = new View.OnClickListener() { + @Override + public void onClick(View v) { + switch (v.getId()) { + case R.id.btn_select_image: + selectImage(); + break; + case R.id.btn_add: + String targetPath = Constants.getDLibImageDirectoryPath() + "/" + et_name.getText().toString() + ".jpg"; + FileUtils.copyFile(imgPath,targetPath); + Intent i = new Intent(AddPerson.this,MainActivity.class); + startActivity(i); + finish(); + break; + } + } + }; + + // Select image from camera and gallery + private void selectImage() { + try { + PackageManager pm = getPackageManager(); + int hasPerm = pm.checkPermission(Manifest.permission.CAMERA, getPackageName()); + if (hasPerm == PackageManager.PERMISSION_GRANTED) { + final CharSequence[] options = {"Take Photo", "Choose From Gallery","Cancel"}; + android.support.v7.app.AlertDialog.Builder builder = new android.support.v7.app.AlertDialog.Builder(AddPerson.this); + builder.setTitle("Select Option"); + builder.setItems(options, new DialogInterface.OnClickListener() { + @Override + public void onClick(DialogInterface dialog, int item) { + if (options[item].equals("Take Photo")) { + dialog.dismiss(); + Intent intent = new Intent(MediaStore.ACTION_IMAGE_CAPTURE); + startActivityForResult(intent, PICK_IMAGE_CAMERA); + } else if (options[item].equals("Choose From Gallery")) { + dialog.dismiss(); + Intent pickPhoto = new Intent(Intent.ACTION_PICK, MediaStore.Images.Media.EXTERNAL_CONTENT_URI); + startActivityForResult(pickPhoto, PICK_IMAGE_GALLERY); + } else if (options[item].equals("Cancel")) { + dialog.dismiss(); + } + } + }); + builder.show(); + } else + Toast.makeText(this, "Camera Permission error", Toast.LENGTH_SHORT).show(); + } catch (Exception e) { + Toast.makeText(this, "Camera Permission error", Toast.LENGTH_SHORT).show(); + e.printStackTrace(); + } + } + + @Override + public void onActivityResult(int requestCode, int resultCode, Intent data) { + super.onActivityResult(requestCode, resultCode, data); + if (requestCode == PICK_IMAGE_CAMERA) { + try { + Uri selectedImage = data.getData(); + bitmap = (Bitmap) data.getExtras().get("data"); + Bitmap scaledBitmap = scaleDown(bitmap, MAX_IMAGE_SIZE, true); + et_image.setText(destination.getAbsolutePath()); + new detectAsync().execute(scaledBitmap); + + } catch (Exception e) { + e.printStackTrace(); + } + } else if (requestCode == PICK_IMAGE_GALLERY) { + Uri selectedImage = data.getData(); + try { + bitmap = MediaStore.Images.Media.getBitmap(this.getContentResolver(), selectedImage); + Bitmap scaledBitmap = scaleDown(bitmap, MAX_IMAGE_SIZE, true); + et_image.setText(getRealPathFromURI(selectedImage)); + new detectAsync().execute(scaledBitmap); + + } catch (Exception e) { + e.printStackTrace(); + } + } + } + + public String getRealPathFromURI(Uri contentUri) { + String[] proj = {MediaStore.Audio.Media.DATA}; + Cursor cursor = managedQuery(contentUri, proj, null, null, null); + int column_index = cursor.getColumnIndexOrThrow(MediaStore.Audio.Media.DATA); + cursor.moveToFirst(); + return cursor.getString(column_index); + } + + public static Bitmap scaleDown(Bitmap realImage, float maxImageSize, boolean filter) { + float ratio = Math.min( + (float) maxImageSize / realImage.getWidth(), + (float) maxImageSize / realImage.getHeight()); + int width = Math.round((float) ratio * realImage.getWidth()); + int height = Math.round((float) ratio * realImage.getHeight()); + + Bitmap newBitmap = Bitmap.createScaledBitmap(realImage, width, + height, filter); + return newBitmap; + } + + private FaceRec mFaceRec; + + private class detectAsync extends AsyncTask { + ProgressDialog dialog = new ProgressDialog(AddPerson.this); + + @Override + protected void onPreExecute() { + dialog.setMessage("Detecting face..."); + dialog.setCancelable(false); + dialog.show(); + super.onPreExecute(); + } + + protected String doInBackground(Bitmap... bp) { + mFaceRec = new FaceRec(Constants.getDLibDirectoryPath()); + List results; + results = mFaceRec.detect(bp[0]); + String msg = null; + if (results.size()==0) { + msg = "No face was detected or face was too small. Please select a different image"; + } else if (results.size() > 1) { + msg = "More than one face was detected. Please select a different image"; + } else { + ByteArrayOutputStream bytes = new ByteArrayOutputStream(); + bp[0].compress(Bitmap.CompressFormat.JPEG, BITMAP_QUALITY, bytes); + FileOutputStream fo; + try { + destination.createNewFile(); + fo = new FileOutputStream(destination); + fo.write(bytes.toByteArray()); + fo.close(); + } catch (FileNotFoundException e) { + e.printStackTrace(); + } catch (IOException e) { + e.printStackTrace(); + } + imgPath = destination.getAbsolutePath(); + } + return msg; + } + + protected void onPostExecute(String result) { + if(dialog != null && dialog.isShowing()){ + dialog.dismiss(); + if (result!=null) { + AlertDialog.Builder builder1 = new AlertDialog.Builder(AddPerson.this); + builder1.setMessage(result); + builder1.setCancelable(true); + AlertDialog alert11 = builder1.create(); + alert11.show(); + imgPath = null; + et_image.setText(""); + } + enableSubmitIfReady(); + } + + } + } + +} diff --git a/app/src/main/java/com/tzutalin/dlib/Constants.java b/app/src/main/java/com/tzutalin/dlib/Constants.java new file mode 100644 index 0000000..82118ca --- /dev/null +++ b/app/src/main/java/com/tzutalin/dlib/Constants.java @@ -0,0 +1,37 @@ +package com.tzutalin.dlib; + +import android.os.Environment; + +import java.io.File; + +/** + * Created by darrenl on 2016/4/22. + * Modified by Gaurav on Feb 23, 2018 + */ + +public final class Constants { + private Constants() { + // Constants should be prive + } + + public static String getDLibDirectoryPath() { + File sdcard = Environment.getExternalStorageDirectory(); + String targetPath = sdcard.getAbsolutePath() + File.separator + "dlib_rec_example"; + return targetPath; + } + + public static String getDLibImageDirectoryPath() { + String targetPath = getDLibDirectoryPath()+ File.separator + "images"; + return targetPath; + } + + public static String getFaceShapeModelPath() { + String targetPath = getDLibDirectoryPath() + File.separator + "shape_predictor_5_face_landmarks.dat"; + return targetPath; + } + + public static String getFaceDescriptorModelPath() { + String targetPath = getDLibDirectoryPath() + File.separator + "dlib_face_recognition_resnet_model_v1.dat"; + return targetPath; + } +} diff --git a/app/src/main/java/com/tzutalin/dlib/FaceRec.java b/app/src/main/java/com/tzutalin/dlib/FaceRec.java new file mode 100644 index 0000000..1422b13 --- /dev/null +++ b/app/src/main/java/com/tzutalin/dlib/FaceRec.java @@ -0,0 +1,90 @@ +package com.tzutalin.dlib; + +import android.graphics.Bitmap; +import android.support.annotation.Keep; +import android.support.annotation.NonNull; +import android.support.annotation.Nullable; +import android.support.annotation.WorkerThread; +import android.util.Log; + +import java.util.Arrays; +import java.util.List; + +/** + * Created by houzhi on 16-10-20. + * Modified by tzutalin on 16-11-15 + * Modified by Gaurav on Feb 23, 2018 + */ +public class FaceRec { + private static final String TAG = "dlib"; + + // accessed by native methods + @SuppressWarnings("unused") + private long mNativeFaceRecContext; + private String dir_path = ""; + + static { + try { + System.loadLibrary("android_dlib"); + jniNativeClassInit(); + Log.d(TAG, "jniNativeClassInit success"); + } catch (UnsatisfiedLinkError e) { + Log.e(TAG, "library not found"); + } + } + + public FaceRec(String sample_dir_path) { + dir_path = sample_dir_path; + Log.e(TAG, "Calling JNI init"); + jniInit(dir_path); + } + + @Nullable + @WorkerThread + public void train() { + jniTrain(); + return; + } + + @Nullable + @WorkerThread + public List recognize(@NonNull Bitmap bitmap) { + VisionDetRet[] detRets = jniBitmapRec(bitmap); + return Arrays.asList(detRets); + } + + @Nullable + @WorkerThread + public List detect(@NonNull Bitmap bitmap) { + VisionDetRet[] detRets = jniBitmapDetect(bitmap); + return Arrays.asList(detRets); + } + + @Override + protected void finalize() throws Throwable { + super.finalize(); + release(); + } + + public void release() { + jniDeInit(); + } + + @Keep + private native static void jniNativeClassInit(); + + @Keep + private synchronized native int jniInit(String sample_dir_path); + + @Keep + private synchronized native int jniDeInit(); + + @Keep + private synchronized native int jniTrain(); + + @Keep + private synchronized native VisionDetRet[] jniBitmapDetect(Bitmap bitmap); + + @Keep + private synchronized native VisionDetRet[] jniBitmapRec(Bitmap bitmap); +} diff --git a/app/src/main/java/com/tzutalin/dlib/FileUtils.java b/app/src/main/java/com/tzutalin/dlib/FileUtils.java new file mode 100644 index 0000000..ae8f165 --- /dev/null +++ b/app/src/main/java/com/tzutalin/dlib/FileUtils.java @@ -0,0 +1,87 @@ +/* + * Copyright 2016 Tzutalin + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.tzutalin.dlib; + +import android.content.Context; +import android.support.annotation.NonNull; +import android.support.annotation.RawRes; + +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +/** + * Created by darrenl on 2016/3/30. + * Modified by Gaurav on Feb 23, 2018 + */ +public class FileUtils { + @NonNull + public static final void copyFileFromRawToOthers(@NonNull final Context context, @RawRes int id, @NonNull final String targetPath) { + InputStream in = context.getResources().openRawResource(id); + FileOutputStream out = null; + try { + out = new FileOutputStream(targetPath); + byte[] buff = new byte[1024]; + int read = 0; + while ((read = in.read(buff)) > 0) { + out.write(buff, 0, read); + } + } catch (Exception e) { + e.printStackTrace(); + } finally { + try { + if (in != null) { + in.close(); + } + if (out != null) { + out.close(); + } + } catch (IOException e) { + e.printStackTrace(); + } + } + } + + public static void copyFile(String srcPath, String targetPath) { + InputStream in = null; + OutputStream out = null; + try { + in = new FileInputStream(srcPath); + out = new FileOutputStream(targetPath); + byte[] buf = new byte[1024]; + int len; + while ((len = in.read(buf)) > 0) { + out.write(buf, 0, len); + } + } catch (Exception e) { + e.printStackTrace(); + } finally { + try { + if (in != null) { + in.close(); + } + if (out != null) { + out.close(); + } + } catch (IOException e) { + e.printStackTrace(); + } + } + } +} diff --git a/app/src/main/java/com/tzutalin/dlib/VisionDetRet.java b/app/src/main/java/com/tzutalin/dlib/VisionDetRet.java new file mode 100644 index 0000000..02ef03c --- /dev/null +++ b/app/src/main/java/com/tzutalin/dlib/VisionDetRet.java @@ -0,0 +1,134 @@ +/* +* Copyright (C) 2015 TzuTaLin +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package com.tzutalin.dlib; + +/** + * Created by Tzutalin on 2015/10/20. + */ + +import android.graphics.Point; + +import java.util.ArrayList; + +/** + * A VisionDetRet contains all the information identifying the location and confidence value of the detected object in a bitmap. + */ +public final class VisionDetRet { + private String mLabel; + private float mConfidence; + private int mLeft; + private int mTop; + private int mRight; + private int mBottom; + private ArrayList mLandmarkPoints = new ArrayList<>(); + + VisionDetRet() { + } + + /** + * @param label Label name + * @param confidence A confidence factor between 0 and 1. This indicates how certain what has been found is actually the label. + * @param l The X coordinate of the left side of the result + * @param t The Y coordinate of the top of the result + * @param r The X coordinate of the right side of the result + * @param b The Y coordinate of the bottom of the result + */ + public VisionDetRet(String label, float confidence, int l, int t, int r, int b) { + mLabel = label; + mLeft = l; + mTop = t; + mRight = r; + mBottom = b; + mConfidence = confidence; + } + + /** + * @return The X coordinate of the left side of the result + */ + public int getLeft() { + return mLeft; + } + + /** + * @return The Y coordinate of the top of the result + */ + public int getTop() { + return mTop; + } + + /** + * @return The X coordinate of the right side of the result + */ + public int getRight() { + return mRight; + } + + /** + * @return The Y coordinate of the bottom of the result + */ + public int getBottom() { + return mBottom; + } + + /** + * @return A confidence factor between 0 and 1. This indicates how certain what has been found is actually the label. + */ + public float getConfidence() { + return mConfidence; + } + + /** + * @return The label of the result + */ + public String getLabel() { + return mLabel; + } + + /** + * Add landmark to the list. Usually, call by jni + * @param x Point x + * @param y Point y + * @return true if adding landmark successfully + */ + public boolean addLandmark(int x, int y) { + return mLandmarkPoints.add(new Point(x, y)); + } + + /** + * Return the list of landmark points + * @return ArrayList of android.graphics.Point + */ + public ArrayList getFaceLandmarks() { + return mLandmarkPoints; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("Left:"); + sb.append(mLabel); + sb.append(", Top:"); + sb.append(mTop); + sb.append(", Right:"); + sb.append(mRight); + sb.append(", Bottom:"); + sb.append(mBottom); + sb.append(", Label:"); + sb.append(mLabel); + return sb.toString(); + } +} diff --git a/app/src/main/jniLibs/arm64-v8a/libandroid_dlib.so b/app/src/main/jniLibs/arm64-v8a/libandroid_dlib.so new file mode 100644 index 0000000..4bad10f Binary files /dev/null and b/app/src/main/jniLibs/arm64-v8a/libandroid_dlib.so differ diff --git a/app/src/main/jniLibs/armeabi-v7a/libandroid_dlib.so b/app/src/main/jniLibs/armeabi-v7a/libandroid_dlib.so new file mode 100644 index 0000000..c3fc4b3 Binary files /dev/null and b/app/src/main/jniLibs/armeabi-v7a/libandroid_dlib.so differ diff --git a/app/src/main/jniLibs/x86/libandroid_dlib.so b/app/src/main/jniLibs/x86/libandroid_dlib.so new file mode 100644 index 0000000..e688576 Binary files /dev/null and b/app/src/main/jniLibs/x86/libandroid_dlib.so differ diff --git a/app/src/main/jniLibs/x86_64/libandroid_dlib.so b/app/src/main/jniLibs/x86_64/libandroid_dlib.so new file mode 100644 index 0000000..d1f5504 Binary files /dev/null and b/app/src/main/jniLibs/x86_64/libandroid_dlib.so differ diff --git a/app/src/main/res/drawable-hdpi/ic_action_info.png b/app/src/main/res/drawable-hdpi/ic_action_info.png new file mode 100644 index 0000000..32bd1aa Binary files /dev/null and b/app/src/main/res/drawable-hdpi/ic_action_info.png differ diff --git a/app/src/main/res/drawable-hdpi/ic_launcher.png b/app/src/main/res/drawable-hdpi/ic_launcher.png new file mode 100644 index 0000000..ac6cf27 Binary files /dev/null and b/app/src/main/res/drawable-hdpi/ic_launcher.png differ diff --git a/app/src/main/res/drawable-hdpi/ic_switch_camera_white_48dp.xml b/app/src/main/res/drawable-hdpi/ic_switch_camera_white_48dp.xml new file mode 100644 index 0000000..63266d6 --- /dev/null +++ b/app/src/main/res/drawable-hdpi/ic_switch_camera_white_48dp.xml @@ -0,0 +1,9 @@ + + + diff --git a/app/src/main/res/drawable-hdpi/ic_switch_camera_white_48dp_inset.png b/app/src/main/res/drawable-hdpi/ic_switch_camera_white_48dp_inset.png new file mode 100644 index 0000000..a621627 Binary files /dev/null and b/app/src/main/res/drawable-hdpi/ic_switch_camera_white_48dp_inset.png differ diff --git a/app/src/main/res/drawable-hdpi/tile.9.png b/app/src/main/res/drawable-hdpi/tile.9.png new file mode 100644 index 0000000..1358628 Binary files /dev/null and b/app/src/main/res/drawable-hdpi/tile.9.png differ diff --git a/app/src/main/res/drawable-mdpi/ic_action_info.png b/app/src/main/res/drawable-mdpi/ic_action_info.png new file mode 100644 index 0000000..8efbbf8 Binary files /dev/null and b/app/src/main/res/drawable-mdpi/ic_action_info.png differ diff --git a/app/src/main/res/drawable-mdpi/ic_launcher.png b/app/src/main/res/drawable-mdpi/ic_launcher.png new file mode 100644 index 0000000..65f92a5 Binary files /dev/null and b/app/src/main/res/drawable-mdpi/ic_launcher.png differ diff --git a/app/src/main/res/drawable-mdpi/ic_switch_camera_white_48dp.xml b/app/src/main/res/drawable-mdpi/ic_switch_camera_white_48dp.xml new file mode 100644 index 0000000..38c8412 --- /dev/null +++ b/app/src/main/res/drawable-mdpi/ic_switch_camera_white_48dp.xml @@ -0,0 +1,9 @@ + + + diff --git a/app/src/main/res/drawable-mdpi/ic_switch_camera_white_48dp_inset.png b/app/src/main/res/drawable-mdpi/ic_switch_camera_white_48dp_inset.png new file mode 100644 index 0000000..74b7917 Binary files /dev/null and b/app/src/main/res/drawable-mdpi/ic_switch_camera_white_48dp_inset.png differ diff --git a/app/src/main/res/drawable-v24/ic_launcher_foreground.xml b/app/src/main/res/drawable-v24/ic_launcher_foreground.xml new file mode 100644 index 0000000..c7bd21d --- /dev/null +++ b/app/src/main/res/drawable-v24/ic_launcher_foreground.xml @@ -0,0 +1,34 @@ + + + + + + + + + + + diff --git a/app/src/main/res/drawable-xhdpi/ic_action_info.png b/app/src/main/res/drawable-xhdpi/ic_action_info.png new file mode 100644 index 0000000..ba143ea Binary files /dev/null and b/app/src/main/res/drawable-xhdpi/ic_action_info.png differ diff --git a/app/src/main/res/drawable-xhdpi/ic_launcher.png b/app/src/main/res/drawable-xhdpi/ic_launcher.png new file mode 100644 index 0000000..6fd1318 Binary files /dev/null and b/app/src/main/res/drawable-xhdpi/ic_launcher.png differ diff --git a/app/src/main/res/drawable-xhdpi/ic_switch_camera_white_48dp.xml b/app/src/main/res/drawable-xhdpi/ic_switch_camera_white_48dp.xml new file mode 100644 index 0000000..fb06b0c --- /dev/null +++ b/app/src/main/res/drawable-xhdpi/ic_switch_camera_white_48dp.xml @@ -0,0 +1,9 @@ + + + diff --git a/app/src/main/res/drawable-xhdpi/ic_switch_camera_white_48dp_inset.png b/app/src/main/res/drawable-xhdpi/ic_switch_camera_white_48dp_inset.png new file mode 100644 index 0000000..8d7cb37 Binary files /dev/null and b/app/src/main/res/drawable-xhdpi/ic_switch_camera_white_48dp_inset.png differ diff --git a/app/src/main/res/drawable-xxhdpi/ic_action_info.png b/app/src/main/res/drawable-xxhdpi/ic_action_info.png new file mode 100644 index 0000000..394eb7e Binary files /dev/null and b/app/src/main/res/drawable-xxhdpi/ic_action_info.png differ diff --git a/app/src/main/res/drawable-xxhdpi/ic_launcher.png b/app/src/main/res/drawable-xxhdpi/ic_launcher.png new file mode 100644 index 0000000..4513cf2 Binary files /dev/null and b/app/src/main/res/drawable-xxhdpi/ic_launcher.png differ diff --git a/app/src/main/res/drawable-xxhdpi/ic_switch_camera_white_48dp.xml b/app/src/main/res/drawable-xxhdpi/ic_switch_camera_white_48dp.xml new file mode 100644 index 0000000..a814bfe --- /dev/null +++ b/app/src/main/res/drawable-xxhdpi/ic_switch_camera_white_48dp.xml @@ -0,0 +1,9 @@ + + + diff --git a/app/src/main/res/drawable-xxhdpi/ic_switch_camera_white_48dp_inset.png b/app/src/main/res/drawable-xxhdpi/ic_switch_camera_white_48dp_inset.png new file mode 100644 index 0000000..74b9f0a Binary files /dev/null and b/app/src/main/res/drawable-xxhdpi/ic_switch_camera_white_48dp_inset.png differ diff --git a/app/src/main/res/drawable-xxxhdpi/ic_switch_camera_white_48dp.xml b/app/src/main/res/drawable-xxxhdpi/ic_switch_camera_white_48dp.xml new file mode 100644 index 0000000..e3c887f --- /dev/null +++ b/app/src/main/res/drawable-xxxhdpi/ic_switch_camera_white_48dp.xml @@ -0,0 +1,9 @@ + + + diff --git a/app/src/main/res/drawable-xxxhdpi/ic_switch_camera_white_48dp_inset.png b/app/src/main/res/drawable-xxxhdpi/ic_switch_camera_white_48dp_inset.png new file mode 100644 index 0000000..f8ffc75 Binary files /dev/null and b/app/src/main/res/drawable-xxxhdpi/ic_switch_camera_white_48dp_inset.png differ diff --git a/app/src/main/res/drawable/ic_launcher_background.xml b/app/src/main/res/drawable/ic_launcher_background.xml new file mode 100644 index 0000000..d5fccc5 --- /dev/null +++ b/app/src/main/res/drawable/ic_launcher_background.xml @@ -0,0 +1,170 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/app/src/main/res/layout/activity_add_person.xml b/app/src/main/res/layout/activity_add_person.xml new file mode 100644 index 0000000..f271ee2 --- /dev/null +++ b/app/src/main/res/layout/activity_add_person.xml @@ -0,0 +1,84 @@ + + + + + + + + + + + + + + + + + +