diff --git a/.changeset/clean-ties-whisper.md b/.changeset/clean-ties-whisper.md new file mode 100644 index 00000000..e7732eab --- /dev/null +++ b/.changeset/clean-ties-whisper.md @@ -0,0 +1,5 @@ +--- +"client-sdk-android": patch +--- + +Fix local video tracks not rendering processed frames diff --git a/.changeset/lucky-lamps-sneeze.md b/.changeset/lucky-lamps-sneeze.md new file mode 100644 index 00000000..60460ad1 --- /dev/null +++ b/.changeset/lucky-lamps-sneeze.md @@ -0,0 +1,5 @@ +--- +"client-sdk-android": patch +--- + +Add utility class NoDropVideoProcessor to force video processing while not connected diff --git a/examples/selfie-segmentation/src/main/java/io/livekit/android/selfie/SelfieBitmapVideoProcessor.kt b/examples/selfie-segmentation/src/main/java/io/livekit/android/selfie/SelfieBitmapVideoProcessor.kt index 55b374b5..64191b5e 100644 --- a/examples/selfie-segmentation/src/main/java/io/livekit/android/selfie/SelfieBitmapVideoProcessor.kt +++ b/examples/selfie-segmentation/src/main/java/io/livekit/android/selfie/SelfieBitmapVideoProcessor.kt @@ -31,6 +31,7 @@ import com.google.mlkit.vision.common.InputImage import com.google.mlkit.vision.segmentation.Segmentation import com.google.mlkit.vision.segmentation.Segmenter import com.google.mlkit.vision.segmentation.selfie.SelfieSegmenterOptions +import io.livekit.android.room.track.video.NoDropVideoProcessor import kotlinx.coroutines.CoroutineDispatcher import kotlinx.coroutines.CoroutineScope import kotlinx.coroutines.cancel @@ -41,13 +42,12 @@ import kotlinx.coroutines.sync.Mutex import livekit.org.webrtc.EglBase import livekit.org.webrtc.SurfaceTextureHelper import livekit.org.webrtc.VideoFrame -import livekit.org.webrtc.VideoProcessor import livekit.org.webrtc.VideoSink import livekit.org.webrtc.YuvHelper import java.io.ByteArrayOutputStream import java.nio.ByteBuffer -class SelfieBitmapVideoProcessor(eglBase: EglBase, dispatcher: CoroutineDispatcher) : VideoProcessor { +class SelfieBitmapVideoProcessor(eglBase: EglBase, dispatcher: CoroutineDispatcher) : NoDropVideoProcessor() { private var targetSink: VideoSink? = null private val segmenter: Segmenter @@ -138,6 +138,7 @@ class SelfieBitmapVideoProcessor(eglBase: EglBase, dispatcher: CoroutineDispatch frameBuffer.release() frame.release() + // Ready for segementation processing. val inputImage = InputImage.fromBitmap(bitmap, 0) val task = segmenter.process(inputImage) @@ -156,6 +157,7 @@ class SelfieBitmapVideoProcessor(eglBase: EglBase, dispatcher: CoroutineDispatch } } + // Prepare for creating the processed video frame. if (lastRotation != rotationDegrees) { surfaceTextureHelper?.setFrameRotation(rotationDegrees) lastRotation = rotationDegrees @@ -175,6 +177,7 @@ class SelfieBitmapVideoProcessor(eglBase: EglBase, dispatcher: CoroutineDispatch } if (canvas != null) { + // Create the video frame. canvas.drawBitmap(bitmap, Matrix(), Paint()) surface.unlockCanvasAndPost(canvas) } diff --git a/examples/selfie-segmentation/src/main/java/io/livekit/android/selfie/SelfieVideoProcessor.kt b/examples/selfie-segmentation/src/main/java/io/livekit/android/selfie/SelfieVideoProcessor.kt index f8d66695..3168372e 100644 --- a/examples/selfie-segmentation/src/main/java/io/livekit/android/selfie/SelfieVideoProcessor.kt +++ b/examples/selfie-segmentation/src/main/java/io/livekit/android/selfie/SelfieVideoProcessor.kt @@ -21,17 +21,17 @@ import com.google.mlkit.vision.common.InputImage import com.google.mlkit.vision.segmentation.Segmentation import com.google.mlkit.vision.segmentation.Segmenter import com.google.mlkit.vision.segmentation.selfie.SelfieSegmenterOptions +import io.livekit.android.room.track.video.NoDropVideoProcessor import kotlinx.coroutines.CoroutineDispatcher import kotlinx.coroutines.CoroutineScope import kotlinx.coroutines.channels.BufferOverflow import kotlinx.coroutines.flow.MutableSharedFlow import kotlinx.coroutines.launch import livekit.org.webrtc.VideoFrame -import livekit.org.webrtc.VideoProcessor import livekit.org.webrtc.VideoSink import java.nio.ByteBuffer -class SelfieVideoProcessor(dispatcher: CoroutineDispatcher) : VideoProcessor { +class SelfieVideoProcessor(dispatcher: CoroutineDispatcher) : NoDropVideoProcessor() { private var targetSink: VideoSink? = null private val segmenter: Segmenter diff --git a/livekit-android-sdk/src/main/java/io/livekit/android/annotations/WebRTCSensitive.kt b/livekit-android-sdk/src/main/java/io/livekit/android/annotations/WebRTCSensitive.kt new file mode 100644 index 00000000..f000034e --- /dev/null +++ b/livekit-android-sdk/src/main/java/io/livekit/android/annotations/WebRTCSensitive.kt @@ -0,0 +1,25 @@ +/* + * Copyright 2024 LiveKit, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.livekit.android.annotations + +/** + * The target marked with this annotation is sensitive to the internal + * code of WebRTC, and should be directly retested whenever WebRTC version + * is upgraded. + */ +@Retention(AnnotationRetention.SOURCE) +annotation class WebRTCSensitive diff --git a/livekit-android-sdk/src/main/java/io/livekit/android/room/track/LocalVideoTrack.kt b/livekit-android-sdk/src/main/java/io/livekit/android/room/track/LocalVideoTrack.kt index 0d3d1265..8d813750 100644 --- a/livekit-android-sdk/src/main/java/io/livekit/android/room/track/LocalVideoTrack.kt +++ b/livekit-android-sdk/src/main/java/io/livekit/android/room/track/LocalVideoTrack.kt @@ -476,13 +476,20 @@ constructor( source.setVideoProcessor(videoProcessor) val surfaceTextureHelper = SurfaceTextureHelper.create("VideoCaptureThread", rootEglBase.eglBaseContext) - val dispatchObserver = CaptureDispatchObserver() - dispatchObserver.registerObserver(source.capturerObserver) + + // Dispatch raw frames to local renderer only if not using a VideoProcessor. + val dispatchObserver = if (videoProcessor == null) { + CaptureDispatchObserver().apply { + registerObserver(source.capturerObserver) + } + } else { + null + } capturer.initialize( surfaceTextureHelper, context, - dispatchObserver, + dispatchObserver ?: source.capturerObserver, ) val rtcTrack = peerConnectionFactory.createVideoTrack(UUID.randomUUID().toString(), source) diff --git a/livekit-android-sdk/src/main/java/io/livekit/android/room/track/video/NoDropVideoProcessor.kt b/livekit-android-sdk/src/main/java/io/livekit/android/room/track/video/NoDropVideoProcessor.kt new file mode 100644 index 00000000..a514b6af --- /dev/null +++ b/livekit-android-sdk/src/main/java/io/livekit/android/room/track/video/NoDropVideoProcessor.kt @@ -0,0 +1,56 @@ +/* + * Copyright 2024 LiveKit, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.livekit.android.room.track.video + +import io.livekit.android.annotations.WebRTCSensitive +import livekit.org.webrtc.VideoFrame +import livekit.org.webrtc.VideoProcessor + +/** + * When not connected to a room, the base [VideoProcessor] implementation will refuse + * to process frames as they will all be dropped (i.e. not sent). + * + * This implementation by default forces all frames to be processed regardless of publish status. + * + * Change [allowDropping] to true if you want to allow dropping of frames. + */ +abstract class NoDropVideoProcessor : VideoProcessor { + /** + * If set to false, forces all frames to be processed regardless of publish status. + * If set to true, frames will only be processed when the associated video track is published. + * + * By default, set to false. + */ + @Suppress("MemberVisibilityCanBePrivate") + var allowDropping = false + + @WebRTCSensitive + override fun onFrameCaptured(frame: VideoFrame, parameters: VideoProcessor.FrameAdaptationParameters) { + if (allowDropping) { + super.onFrameCaptured(frame, parameters) + } else { + // Altered from VideoProcessor + val adaptedFrame = VideoProcessor.applyFrameAdaptationParameters(frame, parameters) + if (adaptedFrame != null) { + this.onFrameCaptured(adaptedFrame) + adaptedFrame.release() + } else { + this.onFrameCaptured(frame) + } + } + } +}