@@ -8,20 +8,12 @@ that performs hair segmentation with TensorFlow Lite on GPU.
8
8
9
9
## Android
10
10
11
- Please see [ Hello World! in MediaPipe on Android] ( hello_world_android.md ) for
12
- general instructions to develop an Android application that uses MediaPipe.
11
+ [ Source] ( https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/hairsegmentationgpu )
13
12
14
- The graph below is used in the
15
- [ Hair Segmentation GPU Android example app] ( https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/hairsegmentationgpu ) .
16
- To build the app, run:
13
+ To build and install the app:
17
14
18
15
``` bash
19
16
bazel build -c opt --config=android_arm64 mediapipe/examples/android/src/java/com/google/mediapipe/apps/hairsegmentationgpu
20
- ```
21
-
22
- To further install the app on an Android device, run:
23
-
24
- ``` bash
25
17
adb install bazel-bin/mediapipe/examples/android/src/java/com/google/mediapipe/apps/hairsegmentationgpu/hairsegmentationgpu.apk
26
18
```
27
19
@@ -37,7 +29,7 @@ below and paste it into [MediaPipe Visualizer](https://viz.mediapipe.dev/).
37
29
``` bash
38
30
# MediaPipe graph that performs hair segmentation with TensorFlow Lite on GPU.
39
31
# Used in the example in
40
- # mediapipie/examples/ios /hairsegmentationgpu.
32
+ # mediapipie/examples/android/src/java/com/mediapipe/apps /hairsegmentationgpu.
41
33
42
34
# Images on GPU coming into and out of the graph.
43
35
input_stream: " input_video"
@@ -84,14 +76,11 @@ node: {
84
76
}
85
77
}
86
78
87
- # Waits for a mask from the previous round of hair segmentation to be fed back
88
- # as an input, and caches it. Upon the arrival of an input image, it checks if
89
- # there is a mask cached, and sends out the mask with the timestamp replaced by
90
- # that of the input image. This is needed so that the "current image" and the
91
- # "previous mask" share the same timestamp, and as a result can be synchronized
92
- # and combined in the subsequent calculator. Note that upon the arrival of the
93
- # very first input frame, an empty packet is sent out to jump start the feedback
94
- # loop.
79
+ # Caches a mask fed back from the previous round of hair segmentation, and upon
80
+ # the arrival of the next input image sends out the cached mask with the
81
+ # timestamp replaced by that of the input image, essentially generating a packet
82
+ # that carries the previous mask. Note that upon the arrival of the very first
83
+ # input image, an empty packet is sent out to jump start the feedback loop.
95
84
node {
96
85
calculator: " PreviousLoopbackCalculator"
97
86
input_stream: " MAIN:throttled_input_video"
@@ -114,9 +103,9 @@ node {
114
103
115
104
# Converts the transformed input image on GPU into an image tensor stored in
116
105
# tflite::gpu::GlBuffer. The zero_center option is set to false to normalize the
117
- # pixel values to [0.f, 1.f] as opposed to [-1.f, 1.f].
118
- # With the max_num_channels option set to 4, all 4 RGBA channels are contained
119
- # in the image tensor.
106
+ # pixel values to [0.f, 1.f] as opposed to [-1.f, 1.f]. With the
107
+ # max_num_channels option set to 4, all 4 RGBA channels are contained in the
108
+ # image tensor.
120
109
node {
121
110
calculator: " TfLiteConverterCalculator"
122
111
input_stream: " IMAGE_GPU:mask_embedded_input_video"
@@ -147,7 +136,7 @@ node {
147
136
node {
148
137
calculator: " TfLiteInferenceCalculator"
149
138
input_stream: " TENSORS_GPU:image_tensor"
150
- output_stream: " TENSORS :segmentation_tensor"
139
+ output_stream: " TENSORS_GPU :segmentation_tensor"
151
140
input_side_packet: " CUSTOM_OP_RESOLVER:op_resolver"
152
141
node_options: {
153
142
[type.googleapis.com/mediapipe.TfLiteInferenceCalculatorOptions] {
@@ -157,23 +146,15 @@ node {
157
146
}
158
147
}
159
148
160
- # The next step (tensors to segmentation) is not yet supported on iOS GPU.
161
- # Convert the previous segmentation mask to CPU for processing.
162
- node: {
163
- calculator: " GpuBufferToImageFrameCalculator"
164
- input_stream: " previous_hair_mask"
165
- output_stream: " previous_hair_mask_cpu"
166
- }
167
-
168
149
# Decodes the segmentation tensor generated by the TensorFlow Lite model into a
169
- # mask of values in [0.f, 1.f], stored in the R channel of a CPU buffer. It also
150
+ # mask of values in [0.f, 1.f], stored in the R channel of a GPU buffer. It also
170
151
# takes the mask generated previously as another input to improve the temporal
171
152
# consistency.
172
153
node {
173
154
calculator: " TfLiteTensorsToSegmentationCalculator"
174
- input_stream: " TENSORS :segmentation_tensor"
175
- input_stream: " PREV_MASK:previous_hair_mask_cpu "
176
- output_stream: " MASK:hair_mask_cpu "
155
+ input_stream: " TENSORS_GPU :segmentation_tensor"
156
+ input_stream: " PREV_MASK_GPU:previous_hair_mask "
157
+ output_stream: " MASK_GPU:hair_mask "
177
158
node_options: {
178
159
[type.googleapis.com/mediapipe.TfLiteTensorsToSegmentationCalculatorOptions] {
179
160
tensor_width: 512
@@ -185,13 +166,6 @@ node {
185
166
}
186
167
}
187
168
188
- # Send the current segmentation mask to GPU for the last step, blending.
189
- node: {
190
- calculator: " ImageFrameToGpuBufferCalculator"
191
- input_stream: " hair_mask_cpu"
192
- output_stream: " hair_mask"
193
- }
194
-
195
169
# Colors the hair segmentation with the color specified in the option.
196
170
node {
197
171
calculator: " RecolorCalculator"
0 commit comments