From 5ffcffa8c7d24e6a89b759e10f9971f62bbba6dc Mon Sep 17 00:00:00 2001 From: panmari Date: Sat, 11 Jan 2025 21:05:18 +0100 Subject: [PATCH] Add support for YUV_420_888 Image format. Casting the bytes to a type directly is not possible, thus allocating a new texture is necessary (and costly). But on the bright side, we avoid the conversion inside the camera plugin [0]. Or any other custom conversion code in dart, which is likely more costly. [0] https://github.com/flutter/packages/blob/d1fd6232ec33cd5a25aa762e605c494afced812f/packages/camera/camera_android/android/src/main/java/io/flutter/plugins/camera/media/ImageStreamReaderUtils.java#L35 --- .../vision_detector_views/camera_view.dart | 47 ++++++++++++----- .../BarcodeScanner.java | 20 ++++++-- .../InputImageConverter.java | 50 +++++++++++++++++-- .../FaceDetector.java | 19 ++++--- .../FaceMeshDetector.java | 41 +++++++++------ .../ImageLabelDetector.java | 16 ++++-- .../ObjectDetector.java | 14 ++++-- .../PoseDetector.java | 7 ++- .../SelfieSegmenter.java | 7 ++- .../SubjectSegmenter.java | 9 +++- .../TextRecognizer.java | 7 ++- 11 files changed, 178 insertions(+), 59 deletions(-) diff --git a/packages/example/lib/vision_detector_views/camera_view.dart b/packages/example/lib/vision_detector_views/camera_view.dart index d3a87f8c..df6436c3 100644 --- a/packages/example/lib/vision_detector_views/camera_view.dart +++ b/packages/example/lib/vision_detector_views/camera_view.dart @@ -358,29 +358,52 @@ class _CameraViewState extends State { // get image format final format = InputImageFormatValue.fromRawValue(image.format.raw); - // validate format depending on platform - // only supported formats: - // * nv21 for Android - // * bgra8888 for iOS - if (format == null || - (Platform.isAndroid && format != InputImageFormat.nv21) || + if (format == null) { + print('could not find format from raw value: $image.format.raw'); + return null; + } + // Validate format depending on platform + final androidSupportedFormats = [ + InputImageFormat.nv21, + InputImageFormat.yv12, + InputImageFormat.yuv_420_888 + ]; + if ((Platform.isAndroid && !androidSupportedFormats.contains(format)) || (Platform.isIOS && format != InputImageFormat.bgra8888)) { + print('image format is not supported: $format'); return null; } - // since format is constraint to nv21 or bgra8888, both only have one plane - if (image.planes.length != 1) return null; - final plane = image.planes.first; + // Compile a flat list of all image data. For image formats with multiple planes, + // takes some copying. + final Uint8List bytes = image.planes.length == 1 + ? image.planes.first.bytes + : _concatenatePlanes(image); // compose InputImage using bytes return InputImage.fromBytes( - bytes: plane.bytes, + bytes: bytes, metadata: InputImageMetadata( size: Size(image.width.toDouble(), image.height.toDouble()), rotation: rotation, // used only in Android - format: format, // used only in iOS - bytesPerRow: plane.bytesPerRow, // used only in iOS + format: format, + bytesPerRow: image.planes.first.bytesPerRow, // used only in iOS ), ); } + + Uint8List _concatenatePlanes(CameraImage image) { + int length = 0; + for (final Plane p in image.planes) { + length += p.bytes.length; + } + + final Uint8List bytes = Uint8List(length); + int offset = 0; + for (final Plane p in image.planes) { + bytes.setRange(offset, offset + p.bytes.length, p.bytes); + offset += p.bytes.length; + } + return bytes; + } } diff --git a/packages/google_mlkit_barcode_scanning/android/src/main/java/com/google_mlkit_barcode_scanning/BarcodeScanner.java b/packages/google_mlkit_barcode_scanning/android/src/main/java/com/google_mlkit_barcode_scanning/BarcodeScanner.java index 42fda756..738ef81f 100644 --- a/packages/google_mlkit_barcode_scanning/android/src/main/java/com/google_mlkit_barcode_scanning/BarcodeScanner.java +++ b/packages/google_mlkit_barcode_scanning/android/src/main/java/com/google_mlkit_barcode_scanning/BarcodeScanner.java @@ -23,6 +23,7 @@ import io.flutter.plugin.common.MethodChannel; public class BarcodeScanner implements MethodChannel.MethodCallHandler { + private static final String START = "vision#startBarcodeScanner"; private static final String CLOSE = "vision#closeBarcodeScanner"; @@ -67,8 +68,11 @@ private com.google.mlkit.vision.barcode.BarcodeScanner initialize(MethodCall cal private void handleDetection(MethodCall call, final MethodChannel.Result result) { Map imageData = call.argument("imageData"); - InputImage inputImage = InputImageConverter.getInputImageFromData(imageData, context, result); - if (inputImage == null) return; + InputImageConverter converter = new InputImageConverter(); + InputImage inputImage = converter.getInputImageFromData(imageData, context, result); + if (inputImage == null) { + return; + } String id = call.argument("id"); com.google.mlkit.vision.barcode.BarcodeScanner barcodeScanner = instances.get(id); @@ -191,7 +195,9 @@ private void handleDetection(MethodCall call, final MethodChannel.Result result) } result.success(barcodeList); }) - .addOnFailureListener(e -> result.error("BarcodeDetectorError", e.toString(), null)); + .addOnFailureListener(e -> result.error("BarcodeDetectorError", e.toString(), e)) + // Closing is necessary for both success and failure. + .addOnCompleteListener(r -> converter.close()); } private void addPoints(Point[] cornerPoints, List> points) { @@ -205,7 +211,9 @@ private void addPoints(Point[] cornerPoints, List> points) private Map getBoundingPoints(@Nullable Rect rect) { Map frame = new HashMap<>(); - if (rect == null) return frame; + if (rect == null) { + return frame; + } frame.put("left", rect.left); frame.put("right", rect.right); frame.put("top", rect.top); @@ -216,7 +224,9 @@ private Map getBoundingPoints(@Nullable Rect rect) { private void closeDetector(MethodCall call) { String id = call.argument("id"); com.google.mlkit.vision.barcode.BarcodeScanner barcodeScanner = instances.get(id); - if (barcodeScanner == null) return; + if (barcodeScanner == null) { + return; + } barcodeScanner.close(); instances.remove(id); } diff --git a/packages/google_mlkit_commons/android/src/main/java/com/google_mlkit_commons/InputImageConverter.java b/packages/google_mlkit_commons/android/src/main/java/com/google_mlkit_commons/InputImageConverter.java index 7f11d35b..08fc347b 100644 --- a/packages/google_mlkit_commons/android/src/main/java/com/google_mlkit_commons/InputImageConverter.java +++ b/packages/google_mlkit_commons/android/src/main/java/com/google_mlkit_commons/InputImageConverter.java @@ -2,22 +2,30 @@ import android.content.Context; import android.graphics.ImageFormat; +import android.graphics.SurfaceTexture; +import android.media.Image; +import android.media.ImageWriter; import android.net.Uri; import android.util.Log; +import android.view.Surface; import com.google.mlkit.vision.common.InputImage; import java.io.File; import java.io.IOException; +import java.lang.AutoCloseable; +import java.nio.ByteBuffer; import java.util.Map; import java.util.Objects; import io.flutter.plugin.common.MethodChannel; -public class InputImageConverter { +public class InputImageConverter implements AutoCloseable { + + ImageWriter writer; //Returns an [InputImage] from the image data received - public static InputImage getInputImageFromData(Map imageData, + public InputImage getInputImageFromData(Map imageData, Context context, MethodChannel.Result result) { //Differentiates whether the image data is a path for a image file, contains image data in form of bytes, or a bitmap @@ -116,9 +124,36 @@ public static InputImage getInputImageFromData(Map imageData, rotationDegrees, imageFormat); } + if (imageFormat == ImageFormat.YUV_420_888) { + // This image format is only supported in InputImage.fromMediaImage, which requires to transform the data to the right java type. + // TODO: Consider reusing the same Surface across multiple calls to save on allocations. + writer = new ImageWriter.Builder(new Surface(new SurfaceTexture(true))) + .setWidthAndHeight(width, height) + .setImageFormat(imageFormat) + .build(); + Image image = writer.dequeueInputImage(); + if (image == null) { + result.error("InputImageConverterError", "failed to allocate space for input image", null); + return null; + } + // Deconstruct individual planes again from flattened array. + Image.Plane[] planes = image.getPlanes(); + // Y plane + ByteBuffer yBuffer = planes[0].getBuffer(); + yBuffer.put(data, 0, width * height); + + // U plane + ByteBuffer uBuffer = planes[1].getBuffer(); + int uOffset = width * height; + uBuffer.put(data, uOffset, (width * height) / 4); + + // V plane + ByteBuffer vBuffer = planes[2].getBuffer(); + int vOffset = uOffset + (width * height) / 4; + vBuffer.put(data, vOffset, (width * height) / 4); + return InputImage.fromMediaImage(image, rotationDegrees); + } result.error("InputImageConverterError", "ImageFormat is not supported.", null); - // TODO: Use InputImage.fromMediaImage, which supports more types, e.g. IMAGE_FORMAT_YUV_420_888. - // See https://developers.google.com/android/reference/com/google/mlkit/vision/common/InputImage#fromMediaImage(android.media.Image,%20int) return null; } catch (Exception e) { Log.e("ImageError", "Getting Image failed"); @@ -133,4 +168,11 @@ public static InputImage getInputImageFromData(Map imageData, } } + @Override + public void close() { + if (writer != null) { + writer.close(); + } + } + } diff --git a/packages/google_mlkit_face_detection/android/src/main/java/com/google_mlkit_face_detection/FaceDetector.java b/packages/google_mlkit_face_detection/android/src/main/java/com/google_mlkit_face_detection/FaceDetector.java index f4f17041..d135460d 100644 --- a/packages/google_mlkit_face_detection/android/src/main/java/com/google_mlkit_face_detection/FaceDetector.java +++ b/packages/google_mlkit_face_detection/android/src/main/java/com/google_mlkit_face_detection/FaceDetector.java @@ -23,6 +23,7 @@ import io.flutter.plugin.common.MethodChannel; class FaceDetector implements MethodChannel.MethodCallHandler { + private static final String START = "vision#startFaceDetector"; private static final String CLOSE = "vision#closeFaceDetector"; @@ -52,9 +53,11 @@ public void onMethodCall(@NonNull MethodCall call, @NonNull MethodChannel.Result private void handleDetection(MethodCall call, final MethodChannel.Result result) { Map imageData = (Map) call.argument("imageData"); - InputImage inputImage = InputImageConverter.getInputImageFromData(imageData, context, result); - if (inputImage == null) + InputImageConverter converter = new InputImageConverter(); + InputImage inputImage = converter.getInputImageFromData(imageData, context, result); + if (inputImage == null) { return; + } String id = call.argument("id"); com.google.mlkit.vision.face.FaceDetector detector = instances.get(id); @@ -115,7 +118,10 @@ private void handleDetection(MethodCall call, final MethodChannel.Result result) result.success(faces); }) .addOnFailureListener( - e -> result.error("FaceDetectorError", e.toString(), null)); + e -> result.error("FaceDetectorError", e.toString(), null)) + // Closing is necessary for both success and failure. + .addOnCompleteListener(r -> converter.close()); + } private FaceDetectorOptions parseOptions(Map options) { @@ -206,7 +212,7 @@ private Map> getContourData(Face face) { private double[] landmarkPosition(Face face, int landmarkInt) { FaceLandmark landmark = face.getLandmark(landmarkInt); if (landmark != null) { - return new double[] { landmark.getPosition().x, landmark.getPosition().y }; + return new double[]{landmark.getPosition().x, landmark.getPosition().y}; } return null; } @@ -217,7 +223,7 @@ private List contourPosition(Face face, int contourInt) { List contourPoints = contour.getPoints(); List result = new ArrayList<>(); for (int i = 0; i < contourPoints.size(); i++) { - result.add(new double[] { contourPoints.get(i).x, contourPoints.get(i).y }); + result.add(new double[]{contourPoints.get(i).x, contourPoints.get(i).y}); } return result; } @@ -227,8 +233,9 @@ private List contourPosition(Face face, int contourInt) { private void closeDetector(MethodCall call) { String id = call.argument("id"); com.google.mlkit.vision.face.FaceDetector detector = instances.get(id); - if (detector == null) + if (detector == null) { return; + } detector.close(); instances.remove(id); } diff --git a/packages/google_mlkit_face_mesh_detection/android/src/main/java/com/google_mlkit_face_mesh_detection/FaceMeshDetector.java b/packages/google_mlkit_face_mesh_detection/android/src/main/java/com/google_mlkit_face_mesh_detection/FaceMeshDetector.java index 0d39560f..7a6bdbce 100644 --- a/packages/google_mlkit_face_mesh_detection/android/src/main/java/com/google_mlkit_face_mesh_detection/FaceMeshDetector.java +++ b/packages/google_mlkit_face_mesh_detection/android/src/main/java/com/google_mlkit_face_mesh_detection/FaceMeshDetector.java @@ -22,6 +22,7 @@ import io.flutter.plugin.common.MethodChannel; class FaceMeshDetector implements MethodChannel.MethodCallHandler { + private static final String START = "vision#startFaceMeshDetector"; private static final String CLOSE = "vision#closeFaceMeshDetector"; @@ -51,8 +52,11 @@ public void onMethodCall(@NonNull MethodCall call, @NonNull MethodChannel.Result private void handleDetection(MethodCall call, final MethodChannel.Result result) { Map imageData = (Map) call.argument("imageData"); - InputImage inputImage = InputImageConverter.getInputImageFromData(imageData, context, result); - if (inputImage == null) return; + InputImageConverter converter = new InputImageConverter(); + InputImage inputImage = converter.getInputImageFromData(imageData, context, result); + if (inputImage == null) { + return; + } String id = call.argument("id"); com.google.mlkit.vision.facemesh.FaceMeshDetector detector = instances.get(id); @@ -104,18 +108,18 @@ private void handleDetection(MethodCall call, final MethodChannel.Result result) meshData.put("triangles", triangles); int[] types = { - FaceMesh.FACE_OVAL, - FaceMesh.LEFT_EYEBROW_TOP, - FaceMesh.LEFT_EYEBROW_BOTTOM, - FaceMesh.RIGHT_EYEBROW_TOP, - FaceMesh.RIGHT_EYEBROW_BOTTOM, - FaceMesh.LEFT_EYE, - FaceMesh.RIGHT_EYE, - FaceMesh.UPPER_LIP_TOP, - FaceMesh.UPPER_LIP_BOTTOM, - FaceMesh.LOWER_LIP_TOP, - FaceMesh.LOWER_LIP_BOTTOM, - FaceMesh.NOSE_BRIDGE + FaceMesh.FACE_OVAL, + FaceMesh.LEFT_EYEBROW_TOP, + FaceMesh.LEFT_EYEBROW_BOTTOM, + FaceMesh.RIGHT_EYEBROW_TOP, + FaceMesh.RIGHT_EYEBROW_BOTTOM, + FaceMesh.LEFT_EYE, + FaceMesh.RIGHT_EYE, + FaceMesh.UPPER_LIP_TOP, + FaceMesh.UPPER_LIP_BOTTOM, + FaceMesh.LOWER_LIP_TOP, + FaceMesh.LOWER_LIP_BOTTOM, + FaceMesh.NOSE_BRIDGE }; Map>> contours = new HashMap<>(); for (int type : types) { @@ -129,7 +133,10 @@ private void handleDetection(MethodCall call, final MethodChannel.Result result) result.success(faceMeshes); }) .addOnFailureListener( - e -> result.error("FaceMeshDetectorError", e.toString(), null)); + e -> result.error("FaceMeshDetectorError", e.toString(), null)) + // Closing is necessary for both success and failure. + .addOnCompleteListener(r -> converter.close()); + } private List> pointsToList(List points) { @@ -152,7 +159,9 @@ private Map pointToMap(FaceMeshPoint point) { private void closeDetector(MethodCall call) { String id = call.argument("id"); com.google.mlkit.vision.facemesh.FaceMeshDetector detector = instances.get(id); - if (detector == null) return; + if (detector == null) { + return; + } detector.close(); instances.remove(id); } diff --git a/packages/google_mlkit_image_labeling/android/src/main/java/com/google_mlkit_image_labeling/ImageLabelDetector.java b/packages/google_mlkit_image_labeling/android/src/main/java/com/google_mlkit_image_labeling/ImageLabelDetector.java index 16a4729d..98a1ced4 100644 --- a/packages/google_mlkit_image_labeling/android/src/main/java/com/google_mlkit_image_labeling/ImageLabelDetector.java +++ b/packages/google_mlkit_image_labeling/android/src/main/java/com/google_mlkit_image_labeling/ImageLabelDetector.java @@ -25,6 +25,7 @@ import io.flutter.plugin.common.MethodChannel; public class ImageLabelDetector implements MethodChannel.MethodCallHandler { + private static final String START = "vision#startImageLabelDetector"; private static final String CLOSE = "vision#closeImageLabelDetector"; private static final String MANAGE = "vision#manageFirebaseModels"; @@ -59,8 +60,11 @@ public void onMethodCall(@NonNull MethodCall call, @NonNull MethodChannel.Result private void handleDetection(MethodCall call, final MethodChannel.Result result) { Map imageData = call.argument("imageData"); - InputImage inputImage = InputImageConverter.getInputImageFromData(imageData, context, result); - if (inputImage == null) return; + InputImageConverter converter = new InputImageConverter(); + InputImage inputImage = converter.getInputImageFromData(imageData, context, result); + if (inputImage == null) { + return; + } String id = call.argument("id"); ImageLabeler imageLabeler = instances.get(id); @@ -106,7 +110,9 @@ private void handleDetection(MethodCall call, final MethodChannel.Result result) result.success(labels); }) - .addOnFailureListener(e -> result.error("ImageLabelDetectorError", e.toString(), null)); + .addOnFailureListener(e -> result.error("ImageLabelDetectorError", e.toString(), e)) + // Closing is necessary for both success and failure. + .addOnCompleteListener(r -> converter.close()); } //Labeler options that are provided to default image labeler(uses inbuilt model). @@ -152,7 +158,9 @@ private CustomImageLabelerOptions getRemoteOptions(Map labelerOp private void closeDetector(MethodCall call) { String id = call.argument("id"); ImageLabeler imageLabeler = instances.get(id); - if (imageLabeler == null) return; + if (imageLabeler == null) { + return; + } imageLabeler.close(); instances.remove(id); } diff --git a/packages/google_mlkit_object_detection/android/src/main/java/com/google_mlkit_object_detection/ObjectDetector.java b/packages/google_mlkit_object_detection/android/src/main/java/com/google_mlkit_object_detection/ObjectDetector.java index f15cc4ab..45b1a69d 100644 --- a/packages/google_mlkit_object_detection/android/src/main/java/com/google_mlkit_object_detection/ObjectDetector.java +++ b/packages/google_mlkit_object_detection/android/src/main/java/com/google_mlkit_object_detection/ObjectDetector.java @@ -25,6 +25,7 @@ import io.flutter.plugin.common.MethodChannel; public class ObjectDetector implements MethodChannel.MethodCallHandler { + private static final String START = "vision#startObjectDetector"; private static final String CLOSE = "vision#closeObjectDetector"; private static final String MANAGE = "vision#manageFirebaseModels"; @@ -59,8 +60,11 @@ public void onMethodCall(@NonNull MethodCall call, @NonNull MethodChannel.Result private void handleDetection(MethodCall call, final MethodChannel.Result result) { Map imageData = (Map) call.argument("imageData"); - InputImage inputImage = InputImageConverter.getInputImageFromData(imageData, context, result); - if (inputImage == null) return; + InputImageConverter converter = new InputImageConverter(); + InputImage inputImage = converter.getInputImageFromData(imageData, context, result); + if (inputImage == null) { + return; + } String id = call.argument("id"); com.google.mlkit.vision.objects.ObjectDetector objectDetector = instances.get(id); @@ -106,8 +110,10 @@ private void handleDetection(MethodCall call, final MethodChannel.Result result) result.success(objects); }).addOnFailureListener(e -> { e.printStackTrace(); - result.error("ObjectDetectionError", e.toString(), null); - }); + result.error("ObjectDetectionError", e.toString(), e); + }) + // Closing is necessary for both success and failure. + .addOnCompleteListener(r -> converter.close()); } private ObjectDetectorOptions getDefaultOptions(Map options) { diff --git a/packages/google_mlkit_pose_detection/android/src/main/java/com/google_mlkit_pose_detection/PoseDetector.java b/packages/google_mlkit_pose_detection/android/src/main/java/com/google_mlkit_pose_detection/PoseDetector.java index 63e63f7f..a8f7e604 100644 --- a/packages/google_mlkit_pose_detection/android/src/main/java/com/google_mlkit_pose_detection/PoseDetector.java +++ b/packages/google_mlkit_pose_detection/android/src/main/java/com/google_mlkit_pose_detection/PoseDetector.java @@ -49,7 +49,8 @@ public void onMethodCall(@NonNull MethodCall call, @NonNull MethodChannel.Result private void handleDetection(MethodCall call, final MethodChannel.Result result) { Map imageData = (Map) call.argument("imageData"); - InputImage inputImage = InputImageConverter.getInputImageFromData(imageData, context, result); + InputImageConverter converter = new InputImageConverter(); + InputImage inputImage = converter.getInputImageFromData(imageData, context, result); if (inputImage == null) return; String id = call.argument("id"); @@ -102,7 +103,9 @@ private void handleDetection(MethodCall call, final MethodChannel.Result result) result.success(array); }) .addOnFailureListener( - e -> result.error("PoseDetectorError", e.toString(), null)); + e -> result.error("PoseDetectorError", e.toString(), e)) + // Closing is necessary for both success and failure. + .addOnCompleteListener(r -> converter.close()); } private void closeDetector(MethodCall call) { diff --git a/packages/google_mlkit_selfie_segmentation/android/src/main/java/com/google_mlkit_selfie_segmentation/SelfieSegmenter.java b/packages/google_mlkit_selfie_segmentation/android/src/main/java/com/google_mlkit_selfie_segmentation/SelfieSegmenter.java index 478a4100..a1f33f7a 100644 --- a/packages/google_mlkit_selfie_segmentation/android/src/main/java/com/google_mlkit_selfie_segmentation/SelfieSegmenter.java +++ b/packages/google_mlkit_selfie_segmentation/android/src/main/java/com/google_mlkit_selfie_segmentation/SelfieSegmenter.java @@ -65,7 +65,8 @@ private Segmenter initialize(MethodCall call) { private void handleDetection(MethodCall call, final MethodChannel.Result result) { Map imageData = (Map) call.argument("imageData"); - InputImage inputImage = InputImageConverter.getInputImageFromData(imageData, context, result); + InputImageConverter converter = new InputImageConverter(); + InputImage inputImage = converter.getInputImageFromData(imageData, context, result); if (inputImage == null) return; String id = call.argument("id"); @@ -102,7 +103,9 @@ private void handleDetection(MethodCall call, final MethodChannel.Result result) result.success(map); }) .addOnFailureListener( - e -> result.error("Selfie segmentation failed!", e.getMessage(), e)); + e -> result.error("Selfie segmentation failed!", e.getMessage(), e)) + // Closing is necessary for both success and failure. + .addOnCompleteListener(r -> converter.close()); } private void closeDetector(MethodCall call) { diff --git a/packages/google_mlkit_subject_segmentation/android/src/main/java/com/google_mlkit_subject_segmentation/SubjectSegmenter.java b/packages/google_mlkit_subject_segmentation/android/src/main/java/com/google_mlkit_subject_segmentation/SubjectSegmenter.java index f6ed9ed7..5005eb2f 100644 --- a/packages/google_mlkit_subject_segmentation/android/src/main/java/com/google_mlkit_subject_segmentation/SubjectSegmenter.java +++ b/packages/google_mlkit_subject_segmentation/android/src/main/java/com/google_mlkit_subject_segmentation/SubjectSegmenter.java @@ -54,12 +54,17 @@ public void onMethodCall(@NonNull MethodCall call, @NonNull MethodChannel.Result private void handleDetection(MethodCall call, MethodChannel.Result result) { Map imageData = (Map) call.argument("imageData"); - InputImage inputImage = InputImageConverter.getInputImageFromData(imageData, context, result); + InputImageConverter converter = new InputImageConverter(); + InputImage inputImage = converter.getInputImageFromData(imageData, context, result); if (inputImage == null) return; String id = call.argument("id"); com.google.mlkit.vision.segmentation.subject.SubjectSegmenter subjectSegmenter = getOrCreateSegmenter(id, call); - subjectSegmenter.process(inputImage).addOnSuccessListener(subjectSegmentationResult -> processResult(subjectSegmentationResult, result)).addOnFailureListener(e -> result.error("Subject segmentation failure!", e.getMessage(), e)); + subjectSegmenter.process(inputImage) + .addOnSuccessListener(subjectSegmentationResult -> processResult(subjectSegmentationResult, result)) + .addOnFailureListener(e -> result.error("Subject segmentation failure!", e.getMessage(), e)) + // Closing is necessary for both success and failure. + .addOnCompleteListener(r -> converter.close()); } private com.google.mlkit.vision.segmentation.subject.SubjectSegmenter getOrCreateSegmenter(String id, MethodCall call) { diff --git a/packages/google_mlkit_text_recognition/android/src/main/java/com/google_mlkit_text_recognition/TextRecognizer.java b/packages/google_mlkit_text_recognition/android/src/main/java/com/google_mlkit_text_recognition/TextRecognizer.java index c7b981bc..207873c8 100644 --- a/packages/google_mlkit_text_recognition/android/src/main/java/com/google_mlkit_text_recognition/TextRecognizer.java +++ b/packages/google_mlkit_text_recognition/android/src/main/java/com/google_mlkit_text_recognition/TextRecognizer.java @@ -78,7 +78,8 @@ private void handleDetection(MethodCall call, final MethodChannel.Result result) if (imageData == null) { return; } - InputImage inputImage = InputImageConverter.getInputImageFromData(imageData, context, result); + InputImageConverter converter = new InputImageConverter(); + InputImage inputImage = converter.getInputImageFromData(imageData, context, result); if (inputImage == null) return; String id = call.argument("id"); @@ -159,7 +160,9 @@ private void handleDetection(MethodCall call, final MethodChannel.Result result) textResult.put("blocks", textBlocks); result.success(textResult); }) - .addOnFailureListener(e -> result.error("TextRecognizerError", e.toString(), null)); + .addOnFailureListener(e -> result.error("TextRecognizerError", e.toString(), e)) + // Closing is necessary for both success and failure. + .addOnCompleteListener(r -> converter.close()); } private void addData(Map addTo,