From 90658fa2bd569362f2b7882e9a77530f1266a1fa Mon Sep 17 00:00:00 2001 From: "vladyslav.yaremenko" Date: Thu, 11 Jul 2024 16:10:56 +0300 Subject: [PATCH 1/2] added permissions for micro/speach --- ios/Voice/Voice.m | 974 +++++++++++++++++++--------------------------- 1 file changed, 407 insertions(+), 567 deletions(-) diff --git a/ios/Voice/Voice.m b/ios/Voice/Voice.m index 73a2cb4e..a4074210 100644 --- a/ios/Voice/Voice.m +++ b/ios/Voice/Voice.m @@ -1,655 +1,495 @@ #import "Voice.h" -#import -#import #import +#import #import +#import #import -#import +#import +#import @interface Voice () -@property(nonatomic) SFSpeechRecognizer *speechRecognizer; -@property(nonatomic) SFSpeechURLRecognitionRequest *recognitionUrlRequest; -@property(nonatomic) SFSpeechAudioBufferRecognitionRequest *recognitionRequest; -@property(nonatomic) AVAudioEngine *audioEngine; -@property(nonatomic) SFSpeechRecognitionTask *recognitionTask; -@property(nonatomic) AVAudioSession *audioSession; +@property (nonatomic) SFSpeechRecognizer* speechRecognizer; +@property (nonatomic) SFSpeechAudioBufferRecognitionRequest* recognitionRequest; +@property (nonatomic) AVAudioEngine* audioEngine; +@property (nonatomic) SFSpeechRecognitionTask* recognitionTask; +@property (nonatomic) AVAudioSession* audioSession; /** Whether speech recognition is finishing.. */ -@property(nonatomic) BOOL isTearingDown; -@property(nonatomic) BOOL continuous; +@property (nonatomic) BOOL isTearingDown; +@property (nonatomic) BOOL continuous; -@property(nonatomic) NSString *sessionId; +@property (nonatomic) NSString *sessionId; /** Previous category the user was on prior to starting speech recognition */ -@property(nonatomic) NSString *priorAudioCategory; +@property (nonatomic) NSString* priorAudioCategory; /** Volume level Metering*/ @property float averagePowerForChannel0; @property float averagePowerForChannel1; +/**Timers for silences*/ +@property (nonatomic, strong) NSTimer *silenceTimer; +@property (nonatomic) NSTimeInterval silenceTimeout; @end -@implementation Voice { +@implementation Voice +{ } -/** Returns "YES" if no errors had occurred */ -- (BOOL)setupAudioSession { - if ([self isHeadsetPluggedIn] || [self isHeadSetBluetooth]) { - [self.audioSession setCategory:AVAudioSessionCategoryPlayAndRecord - withOptions:AVAudioSessionCategoryOptionAllowBluetooth - error:nil]; - } else { - [self.audioSession setCategory:AVAudioSessionCategoryPlayAndRecord - withOptions:AVAudioSessionCategoryOptionDefaultToSpeaker - error:nil]; - } - - NSError *audioSessionError = nil; - - // Activate the audio session - [self.audioSession - setActive:YES - withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation - error:&audioSessionError]; - - if (audioSessionError != nil) { - [self sendResult:@{ - @"code" : @"audio", - @"message" : [audioSessionError localizedDescription] - }:nil:nil:nil]; - return NO; - } +// Initialize silence timeout +-(instancetype)init { + self = [super init]; + if (self) { + _silenceTimeout = 10.0; // 10 seconds timeout + } + return self; +} - [[NSNotificationCenter defaultCenter] - addObserver:self - selector:@selector(teardown) - name:RCTBridgeWillReloadNotification - object:nil]; +/** Returns "YES" if no errors had occurred */ +-(BOOL) setupAudioSession { + if ([self isHeadsetPluggedIn] || [self isHeadSetBluetooth]){ + [self.audioSession setCategory:AVAudioSessionCategoryPlayAndRecord withOptions:AVAudioSessionCategoryOptionAllowBluetooth error: nil]; + } + else { + [self.audioSession setCategory:AVAudioSessionCategoryPlayAndRecord withOptions:AVAudioSessionCategoryOptionDefaultToSpeaker error: nil]; + } + + NSError* audioSessionError = nil; + + // Activate the audio session + [self.audioSession setActive:YES withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation error:&audioSessionError]; + + if (audioSessionError != nil) { + [self sendResult:@{@"code": @"audio", @"message": [audioSessionError localizedDescription]} :nil :nil :nil]; + return NO; + } - return YES; + [[NSNotificationCenter defaultCenter] addObserver:self selector:@selector(teardown) name:RCTBridgeWillReloadNotification object:nil]; + + return YES; } - (BOOL)isHeadsetPluggedIn { - AVAudioSessionRouteDescription *route = - [[AVAudioSession sharedInstance] currentRoute]; - for (AVAudioSessionPortDescription *desc in [route outputs]) { - if ([[desc portType] isEqualToString:AVAudioSessionPortHeadphones] || - [[desc portType] isEqualToString:AVAudioSessionPortBluetoothA2DP]) - return YES; - } - return NO; -} - -- (BOOL)isHeadSetBluetooth { - NSArray *arrayInputs = [[AVAudioSession sharedInstance] availableInputs]; - for (AVAudioSessionPortDescription *port in arrayInputs) { - if ([port.portType isEqualToString:AVAudioSessionPortBluetoothHFP]) { - return YES; + AVAudioSessionRouteDescription* route = [[AVAudioSession sharedInstance] currentRoute]; + for (AVAudioSessionPortDescription* desc in [route outputs]) { + if ([[desc portType] isEqualToString:AVAudioSessionPortHeadphones] || [[desc portType] isEqualToString:AVAudioSessionPortBluetoothA2DP]) + return YES; } - } - return NO; + return NO; } -- (void)teardown { - self.isTearingDown = YES; - [self.recognitionTask cancel]; - self.recognitionTask = nil; - - // Set back audio session category - [self resetAudioSession]; - - // End recognition request - [self.recognitionRequest endAudio]; - - // Remove tap on bus - [self.audioEngine.inputNode removeTapOnBus:0]; - [self.audioEngine.inputNode reset]; - - // Stop audio engine and dereference it for re-allocation - if (self.audioEngine.isRunning) { - [self.audioEngine stop]; - [self.audioEngine reset]; - self.audioEngine = nil; - } - - self.recognitionRequest = nil; - self.recognitionUrlRequest = nil; - self.sessionId = nil; - self.isTearingDown = NO; +-(BOOL)isHeadSetBluetooth { + NSArray *arrayInputs = [[AVAudioSession sharedInstance] availableInputs]; + for (AVAudioSessionPortDescription *port in arrayInputs) + { + if ([port.portType isEqualToString:AVAudioSessionPortBluetoothHFP]) + { + return YES; + } + } + return NO; } -- (void)resetAudioSession { - if (self.audioSession == nil) { - self.audioSession = [AVAudioSession sharedInstance]; - } - // Set audio session to inactive and notify other sessions - // [self.audioSession setActive:NO - // withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation error: - // nil]; - NSString *audioCategory = [self.audioSession category]; - // Category hasn't changed -- do nothing - if ([self.priorAudioCategory isEqualToString:audioCategory]) - return; - // Reset back to the previous category - if ([self isHeadsetPluggedIn] || [self isHeadSetBluetooth]) { - [self.audioSession setCategory:self.priorAudioCategory - withOptions:AVAudioSessionCategoryOptionAllowBluetooth - error:nil]; - } else { - [self.audioSession setCategory:self.priorAudioCategory - withOptions:AVAudioSessionCategoryOptionDefaultToSpeaker - error:nil]; - } - // Remove pointer reference - self.audioSession = nil; +- (void) teardown { + self.isTearingDown = YES; + [self.recognitionTask cancel]; + self.recognitionTask = nil; + + // Stop the silence timer + [self stopSilenceTimer]; + + // Set back audio session category + [self resetAudioSession]; + + // End recognition request + [self.recognitionRequest endAudio]; + + // Remove tap on bus + [self.audioEngine.inputNode removeTapOnBus:0]; + [self.audioEngine.inputNode reset]; + + // Stop audio engine and dereference it for re-allocation + if (self.audioEngine.isRunning) { + [self.audioEngine stop]; + [self.audioEngine reset]; + self.audioEngine = nil; + } + + + self.recognitionRequest = nil; + self.sessionId = nil; + self.isTearingDown = NO; } -- (void)setupAndTranscribeFile:(NSString *)filePath - withLocaleStr:(NSString *)localeStr { - - // Tear down resources before starting speech recognition.. - [self teardown]; - - self.sessionId = [[NSUUID UUID] UUIDString]; - - NSLocale *locale = nil; - if ([localeStr length] > 0) { - locale = [NSLocale localeWithLocaleIdentifier:localeStr]; - } - - if (locale) { - self.speechRecognizer = [[SFSpeechRecognizer alloc] initWithLocale:locale]; - } else { - self.speechRecognizer = [[SFSpeechRecognizer alloc] init]; - } - - self.speechRecognizer.delegate = self; - - [self sendEventWithName:@"onTranscriptionError" - body:@{ - @"error" : - @{@"code" : @"fake_error", @"message" : filePath} - }]; - // Set up recognition request - self.recognitionUrlRequest = [[SFSpeechURLRecognitionRequest alloc] - initWithURL:[NSURL fileURLWithPath:filePath]]; - - if (self.recognitionUrlRequest == nil) { - [self sendEventWithName:@"onTranscriptionError" - body:@{@"error" : @{@"code" : @"recognition_url_init"}}]; - [self teardown]; - return; - } - - @try { - - [self sendEventWithName:@"onTranscriptionStart" body:nil]; - - // Set up recognition task - // A recognition task represents a speech recognition session. - // We keep a reference to the task so that it can be cancelled. - NSString *taskSessionId = self.sessionId; - self.recognitionTask = [self.speechRecognizer - recognitionTaskWithRequest:self.recognitionUrlRequest - resultHandler:^( - SFSpeechRecognitionResult *_Nullable result, - NSError *_Nullable error) { - if (![taskSessionId isEqualToString:self.sessionId]) { - // session ID has changed, so ignore any capture - // results and error - [self teardown]; - return; - } - if (error != nil) { - NSString *errorMessage = [NSString - stringWithFormat:@"%ld/%@", error.code, - [error localizedDescription]]; - - [self sendEventWithName:@"onTranscriptionError" - body:@{ - @"error" : @{ - @"code" : @"recognition_fail_o", - @"message" : errorMessage, - @"filePath" : filePath - } - }]; - [self teardown]; - return; - } - // No result. - if (result == nil) { - [self sendEventWithName:@"onTranscriptionEnd" - body:nil]; - [self teardown]; - return; - } - - BOOL isFinal = result.isFinal; - - if (isFinal) { - NSMutableArray *transcriptionSegs = - [NSMutableArray new]; - for (SFTranscriptionSegment *segment in result - .bestTranscription.segments) { - [transcriptionSegs addObject:@{ - @"transcription" : segment.substring, - @"timestamp" : @(segment.timestamp), - @"duration" : @(segment.duration) - }]; - } - - [self sendEventWithName:@"onTranscriptionResults" - body:@{ - @"segments" : transcriptionSegs, - @"transcription" : - result.bestTranscription - .formattedString, - @"isFinal" : @(isFinal) - }]; - } - - if (isFinal || self.recognitionTask.isCancelled || - self.recognitionTask.isFinishing) { - [self sendEventWithName:@"onTranscriptionEnd" - body:nil]; - [self teardown]; - return; - } - }]; - } @catch (NSException *exception) { - [self sendEventWithName:@"onTranscriptionError" - body:@{ - @"error" : @{ - @"code" : @"start_transcription_fail", - @"message" : [exception reason] - } - }]; - [self teardown]; - - return; - } @finally { - } +-(void) resetAudioSession { + if (self.audioSession == nil) { + self.audioSession = [AVAudioSession sharedInstance]; + } + // Set audio session to inactive and notify other sessions + [self.audioSession setActive:NO withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation error: nil]; + NSString* audioCategory = [self.audioSession category]; + // Category hasn't changed -- do nothing + if ([self.priorAudioCategory isEqualToString:audioCategory]) return; + // Reset back to the previous category + if ([self isHeadsetPluggedIn] || [self isHeadSetBluetooth]) { + [self.audioSession setCategory:self.priorAudioCategory withOptions:AVAudioSessionCategoryOptionAllowBluetooth error: nil]; + } else { + [self.audioSession setCategory:self.priorAudioCategory withOptions:AVAudioSessionCategoryOptionDefaultToSpeaker error: nil]; + } + // Remove pointer reference + self.audioSession = nil; } -- (void)setupAndStartRecognizing:(NSString *)localeStr { - self.audioSession = [AVAudioSession sharedInstance]; - self.priorAudioCategory = [self.audioSession category]; - // Tear down resources before starting speech recognition.. - [self teardown]; - - self.sessionId = [[NSUUID UUID] UUIDString]; - - NSLocale *locale = nil; - if ([localeStr length] > 0) { - locale = [NSLocale localeWithLocaleIdentifier:localeStr]; - } - - if (locale) { - self.speechRecognizer = [[SFSpeechRecognizer alloc] initWithLocale:locale]; - } else { - self.speechRecognizer = [[SFSpeechRecognizer alloc] init]; - } - - self.speechRecognizer.delegate = self; - - // Start audio session... - if (![self setupAudioSession]) { - [self teardown]; - return; - } - - self.recognitionRequest = - [[SFSpeechAudioBufferRecognitionRequest alloc] init]; - // Configure request so that results are returned before audio - // recording is finished - self.recognitionRequest.shouldReportPartialResults = YES; - - if (self.recognitionRequest == nil) { - [self sendResult:@{@"code" : @"recognition_init"}:nil:nil:nil]; +- (void) setupAndStartRecognizing:(NSString*)localeStr { + self.audioSession = [AVAudioSession sharedInstance]; + self.priorAudioCategory = [self.audioSession category]; + // Tear down resources before starting speech recognition.. [self teardown]; - return; - } - - if (self.audioEngine == nil) { - self.audioEngine = [[AVAudioEngine alloc] init]; - } - - @try { - AVAudioInputNode *inputNode = self.audioEngine.inputNode; + + self.sessionId = [[NSUUID UUID] UUIDString]; + + NSLocale* locale = nil; + if ([localeStr length] > 0) { + locale = [NSLocale localeWithLocaleIdentifier:localeStr]; + } + + if (locale) { + self.speechRecognizer = [[SFSpeechRecognizer alloc] initWithLocale:locale]; + } else { + self.speechRecognizer = [[SFSpeechRecognizer alloc] init]; + } + + self.speechRecognizer.delegate = self; + + // Start audio session... + if (![self setupAudioSession]) { + [self teardown]; + return; + } + + self.recognitionRequest = [[SFSpeechAudioBufferRecognitionRequest alloc] init]; + // Configure request so that results are returned before audio recording is finished + self.recognitionRequest.shouldReportPartialResults = YES; + + if (self.recognitionRequest == nil) { + [self sendResult:@{@"code": @"recognition_init"} :nil :nil :nil]; + [self teardown]; + return; + } + + if (self.audioEngine == nil) { + self.audioEngine = [[AVAudioEngine alloc] init]; + } + + @try { + AVAudioInputNode* inputNode = self.audioEngine.inputNode; if (inputNode == nil) { - [self sendResult:@{@"code" : @"input"}:nil:nil:nil]; - [self teardown]; - return; + [self sendResult:@{@"code": @"input"} :nil :nil :nil]; + [self teardown]; + return; } - + [self sendEventWithName:@"onSpeechStart" body:nil]; - + + // A recognition task represents a speech recognition session. // We keep a reference to the task so that it can be cancelled. NSString *taskSessionId = self.sessionId; - self.recognitionTask = [self.speechRecognizer - recognitionTaskWithRequest:self.recognitionRequest - resultHandler:^( - SFSpeechRecognitionResult *_Nullable result, - NSError *_Nullable error) { - if (![taskSessionId isEqualToString:self.sessionId]) { - // session ID has changed, so ignore any - // capture results and error - [self teardown]; - return; - } - if (error != nil) { - NSString *errorMessage = [NSString - stringWithFormat:@"%ld/%@", error.code, - [error localizedDescription]]; - [self sendResult:@{ - @"code" : @"recognition_fail_ooo", - @"message" : errorMessage - }:nil:nil:nil]; - [self teardown]; - return; - } - - // No result. - if (result == nil) { - [self sendEventWithName:@"onSpeechEnd" body:nil]; - [self teardown]; - return; - } - - BOOL isFinal = result.isFinal; - - NSMutableArray *transcriptionDics = [NSMutableArray new]; - for (SFTranscription *transcription in result - .transcriptions) { - [transcriptionDics - addObject:transcription.formattedString]; - } - - [self sendResult :nil :result.bestTranscription.formattedString :transcriptionDics :[NSNumber numberWithBool:isFinal]]; - - if (isFinal || self.recognitionTask.isCancelled || - self.recognitionTask.isFinishing) { - [self sendEventWithName:@"onSpeechEnd" body:nil]; - if (!self.continuous) { - [self teardown]; - } - return; - } - }]; - - AVAudioFormat *recordingFormat = [inputNode outputFormatForBus:0]; + // Start the silence timer + [self startSilenceTimer]; + + self.recognitionTask = [self.speechRecognizer recognitionTaskWithRequest:self.recognitionRequest resultHandler:^(SFSpeechRecognitionResult * _Nullable result, NSError * _Nullable error) { + if (![taskSessionId isEqualToString:self.sessionId]) { + // session ID has changed, so ignore any capture results and error + [self teardown]; + return; + } + if (error != nil) { + NSString *errorMessage = [NSString stringWithFormat:@"%ld/%@", error.code, [error localizedDescription]]; + [self sendResult:@{@"code": @"recognition_fail", @"message": errorMessage} :nil :nil :nil]; + [self teardown]; + return; + } + + // No result. + if (result == nil) { + [self sendEventWithName:@"onSpeechEnd" body:nil]; + [self teardown]; + return; + } + + BOOL isFinal = result.isFinal; + + NSMutableArray* transcriptionDics = [NSMutableArray new]; + for (SFTranscription* transcription in result.transcriptions) { + [transcriptionDics addObject:transcription.formattedString]; + } + + [self sendResult :nil :result.bestTranscription.formattedString :transcriptionDics :[NSNumber numberWithBool:isFinal]]; + + if (isFinal || self.recognitionTask.isCancelled || self.recognitionTask.isFinishing) { + [self sendEventWithName:@"onSpeechEnd" body:nil]; + if (!self.continuous) { + [self teardown]; + } + return; + } + + }]; + + AVAudioFormat* recordingFormat = [inputNode outputFormatForBus:0]; AVAudioMixerNode *mixer = [[AVAudioMixerNode alloc] init]; [self.audioEngine attachNode:mixer]; - + // Start recording and append recording buffer to speech recognizer @try { - [mixer - installTapOnBus:0 - bufferSize:1024 - format:recordingFormat - block:^(AVAudioPCMBuffer *_Nonnull buffer, - AVAudioTime *_Nonnull when) { - // Volume Level Metering - UInt32 inNumberFrames = buffer.frameLength; - float LEVEL_LOWPASS_TRIG = 0.5; - if (buffer.format.channelCount > 0) { - Float32 *samples = - (Float32 *)buffer.floatChannelData[0]; - Float32 avgValue = 0; - - vDSP_maxmgv((Float32 *)samples, 1, &avgValue, - inNumberFrames); - self.averagePowerForChannel0 = - (LEVEL_LOWPASS_TRIG * - ((avgValue == 0) ? -100 - : 20.0 * log10f(avgValue))) + - ((1 - LEVEL_LOWPASS_TRIG) * - self.averagePowerForChannel0); - self.averagePowerForChannel1 = - self.averagePowerForChannel0; - } - - if (buffer.format.channelCount > 1) { - Float32 *samples = - (Float32 *)buffer.floatChannelData[1]; - Float32 avgValue = 0; - - vDSP_maxmgv((Float32 *)samples, 1, &avgValue, - inNumberFrames); - self.averagePowerForChannel1 = - (LEVEL_LOWPASS_TRIG * - ((avgValue == 0) ? -100 - : 20.0 * log10f(avgValue))) + - ((1 - LEVEL_LOWPASS_TRIG) * - self.averagePowerForChannel1); - } - // Normalizing the Volume Value on scale of (0-10) - self.averagePowerForChannel1 = - [self _normalizedPowerLevelFromDecibels: - self.averagePowerForChannel1] * - 10; - NSNumber *value = [NSNumber - numberWithFloat:self.averagePowerForChannel1]; - [self sendEventWithName:@"onSpeechVolumeChanged" - body:@{@"value" : value}]; - - // Todo: write recording buffer to file (if user - // opts in) - if (self.recognitionRequest != nil) { - [self.recognitionRequest appendAudioPCMBuffer:buffer]; - } - }]; + [mixer installTapOnBus:0 bufferSize:1024 format:recordingFormat block:^(AVAudioPCMBuffer * _Nonnull buffer, AVAudioTime * _Nonnull when) { + //Volume Level Metering + UInt32 inNumberFrames = buffer.frameLength; + float LEVEL_LOWPASS_TRIG = 0.5; + if(buffer.format.channelCount>0) + { + Float32* samples = (Float32*)buffer.floatChannelData[0]; + Float32 avgValue = 0; + + vDSP_maxmgv((Float32*)samples, 1, &avgValue, inNumberFrames); + self.averagePowerForChannel0 = (LEVEL_LOWPASS_TRIG*((avgValue==0)?-100:20.0*log10f(avgValue))) + ((1-LEVEL_LOWPASS_TRIG)*self.averagePowerForChannel0) ; + self.averagePowerForChannel1 = self.averagePowerForChannel0; + } + + if(buffer.format.channelCount>1) + { + Float32* samples = (Float32*)buffer.floatChannelData[1]; + Float32 avgValue = 0; + + vDSP_maxmgv((Float32*)samples, 1, &avgValue, inNumberFrames); + self.averagePowerForChannel1 = (LEVEL_LOWPASS_TRIG*((avgValue==0)?-100:20.0*log10f(avgValue))) + ((1-LEVEL_LOWPASS_TRIG)*self.averagePowerForChannel1) ; + + } + // Normalizing the Volume Value on scale of (0-10) + self.averagePowerForChannel1 = [self _normalizedPowerLevelFromDecibels:self.averagePowerForChannel1]*10; + NSNumber *value = [NSNumber numberWithFloat:self.averagePowerForChannel1]; + [self sendEventWithName:@"onSpeechVolumeChanged" body:@{@"value": value}]; + + // Todo: write recording buffer to file (if user opts in) + if (self.recognitionRequest != nil) { + [self.recognitionRequest appendAudioPCMBuffer:buffer]; + } + }]; } @catch (NSException *exception) { - NSLog(@"[Error] - %@ %@", exception.name, exception.reason); - [self sendResult:@{ - @"code" : @"start_recording", - @"message" : [exception reason] - }:nil:nil:nil]; - [self teardown]; - return; - } @finally { - } - + NSLog(@"[Error] - %@ %@", exception.name, exception.reason); + [self sendResult:@{@"code": @"start_recording", @"message": [exception reason]} :nil :nil :nil]; + [self teardown]; + return; + } @finally {} + [self.audioEngine connect:inputNode to:mixer format:recordingFormat]; [self.audioEngine prepare]; - NSError *audioSessionError = nil; + NSError* audioSessionError = nil; [self.audioEngine startAndReturnError:&audioSessionError]; if (audioSessionError != nil) { - [self sendResult:@{ - @"code" : @"audio", - @"message" : [audioSessionError localizedDescription] - }:nil:nil:nil]; - [self teardown]; - return; + [self sendResult:@{@"code": @"audio", @"message": [audioSessionError localizedDescription]} :nil :nil :nil]; + [self teardown]; + return; } - } @catch (NSException *exception) { - [self sendResult:@{ - @"code" : @"start_recording", - @"message" : [exception reason] - }:nil:nil:nil]; + } + @catch (NSException *exception) { + NSLog(@"[Error] - %@ %@", exception.name, exception.reason); + [self sendResult:@{@"code": @"start_recording", @"message": [exception reason]} :nil :nil :nil]; return; } } +//Timer methods start +/**Start silence timer */ +-(void)startSilenceTimer { // ADDED + [self stopSilenceTimer]; + self.silenceTimer = [NSTimer scheduledTimerWithTimeInterval:self.silenceTimeout target:self selector:@selector(handleSilenceTimeout) userInfo:nil repeats:NO]; +} -- (CGFloat)_normalizedPowerLevelFromDecibels:(CGFloat)decibels { - if (decibels < -80.0f || decibels == 0.0f) { - return 0.0f; - } - CGFloat power = - powf((powf(10.0f, 0.05f * decibels) - powf(10.0f, 0.05f * -80.0f)) * - (1.0f / (1.0f - powf(10.0f, 0.05f * -80.0f))), - 1.0f / 2.0f); - if (power < 1.0f) { - return power; - } else { - return 1.0f; - } +/**Stop silence timer */ +-(void)stopSilenceTimer { // ADDED + if (self.silenceTimer) { + [self.silenceTimer invalidate]; + self.silenceTimer = nil; + } } -- (NSArray *)supportedEvents { - return @[ - @"onSpeechResults", @"onSpeechStart", @"onSpeechPartialResults", - @"onSpeechError", @"onSpeechEnd", @"onSpeechRecognized", - @"onSpeechVolumeChanged", @"onTranscriptionStart", @"onTranscriptionEnd", - @"onTranscriptionError", @"onTranscriptionResults" - ]; +/**Handle silence timeout */ +-(void)handleSilenceTimeout { // ADDED + [self sendResult:@{@"code": @"silence_timeout", @"message": @"No speech detected for a period of time"} :nil :nil :nil]; + [self teardown]; +} //Timer methods end + +- (CGFloat)_normalizedPowerLevelFromDecibels:(CGFloat)decibels { + if (decibels < -80.0f || decibels == 0.0f) { + return 0.0f; + } + CGFloat power = powf((powf(10.0f, 0.05f * decibels) - powf(10.0f, 0.05f * -80.0f)) * (1.0f / (1.0f - powf(10.0f, 0.05f * -80.0f))), 1.0f / 2.0f); + if (power < 1.0f) { + return power; + }else{ + return 1.0f; + } } -- (void)sendResult:(NSDictionary *) - error:(NSString *)bestTranscription - :(NSArray *)transcriptions - :(NSNumber *)isFinal { - if (error != nil) { - [self sendEventWithName:@"onSpeechError" body:@{@"error" : error}]; - } - if (bestTranscription != nil) { - [self sendEventWithName:@"onSpeechResults" - body:@{@"value" : @[ bestTranscription ]}]; - } - if (transcriptions != nil) { - [self sendEventWithName:@"onSpeechPartialResults" - body:@{@"value" : transcriptions}]; - } - if (isFinal != nil) { - [self sendEventWithName:@"onSpeechRecognized" body:@{@"isFinal" : isFinal}]; - } +- (NSArray *)supportedEvents +{ + return @[ + @"onSpeechResults", + @"onSpeechStart", + @"onSpeechPartialResults", + @"onSpeechError", + @"onSpeechEnd", + @"onSpeechRecognized", + @"onSpeechVolumeChanged" + ]; } -// Called when the availability of the given recognizer changes -- (void)speechRecognizer:(SFSpeechRecognizer *)speechRecognizer - availabilityDidChange:(BOOL)available { - if (available == false) { - [self sendResult:RCTMakeError(@"Speech recognition is not available now", - nil, nil):nil:nil:nil]; - } +- (void) sendResult:(NSDictionary*)error :(NSString*)bestTranscription :(NSArray*)transcriptions :(NSNumber*)isFinal { + if (error != nil) { + [self sendEventWithName:@"onSpeechError" body:@{@"error": error}]; + } + if (bestTranscription != nil) { + [self sendEventWithName:@"onSpeechResults" body:@{@"value":@[bestTranscription]} ]; + } + if (transcriptions != nil) { + [self sendEventWithName:@"onSpeechPartialResults" body:@{@"value":transcriptions}]; + } + if (isFinal != nil) { + [self sendEventWithName:@"onSpeechRecognized" body: @{@"isFinal": isFinal}]; + } } -RCT_EXPORT_METHOD(stopSpeech : (RCTResponseSenderBlock)callback) { - [self.recognitionTask finish]; - callback(@[ @false ]); +// Upgraded for checking Permissions in all steps +- (void)checkMicrophonePermissionWithCompletion:(void (^)(BOOL granted))completion { + [[AVAudioSession sharedInstance] requestRecordPermission:^(BOOL granted) { + dispatch_async(dispatch_get_main_queue(), ^{ + completion(granted); + }); + }]; } -RCT_EXPORT_METHOD(stopTranscription : (RCTResponseSenderBlock)callback) { - [self.recognitionTask finish]; - callback(@[ @false ]); +// Added to show alert when user disabled Permissions +- (void)showAlertWithTitle:(NSString *)title message:(NSString *)message { + dispatch_async(dispatch_get_main_queue(), ^{ + UIAlertController *alertController = [UIAlertController alertControllerWithTitle:title + message:message + preferredStyle:UIAlertControllerStyleAlert]; + + UIAlertAction *settingsAction = [UIAlertAction actionWithTitle:@"Settings" + style:UIAlertActionStyleDefault + handler:^(UIAlertAction * _Nonnull action) { + NSURL *settingsURL = [NSURL URLWithString:UIApplicationOpenSettingsURLString]; + if ([[UIApplication sharedApplication] canOpenURL:settingsURL]) { + [[UIApplication sharedApplication] openURL:settingsURL options:@{} completionHandler:nil]; + } + }]; + + UIAlertAction *notNowAction = [UIAlertAction actionWithTitle:@"Not Now" + style:UIAlertActionStyleDefault + handler:nil]; + + [alertController addAction:settingsAction]; + [alertController addAction:notNowAction]; + + UIViewController *rootViewController = RCTPresentedViewController(); // Ensure you have a way to get the root view controller + [rootViewController presentViewController:alertController animated:YES completion:nil]; + }); } -RCT_EXPORT_METHOD(cancelSpeech : (RCTResponseSenderBlock)callback) { - [self.recognitionTask cancel]; - callback(@[ @false ]); +// Called when the availability of the given recognizer changes +- (void)speechRecognizer:(SFSpeechRecognizer *)speechRecognizer availabilityDidChange:(BOOL)available { + if (available == false) { + [self sendResult:RCTMakeError(@"Speech recognition is not available now", nil, nil) :nil :nil :nil]; + } } -RCT_EXPORT_METHOD(cancelTranscription : (RCTResponseSenderBlock)callback) { - [self.recognitionTask cancel]; - callback(@[ @false ]); +RCT_EXPORT_METHOD(stopSpeech:(RCTResponseSenderBlock)callback) +{ + [self.recognitionTask finish]; + callback(@[@false]); } -RCT_EXPORT_METHOD(destroySpeech : (RCTResponseSenderBlock)callback) { - [self teardown]; - callback(@[ @false ]); + +RCT_EXPORT_METHOD(cancelSpeech:(RCTResponseSenderBlock)callback) { + [self.recognitionTask cancel]; + callback(@[@false]); } -RCT_EXPORT_METHOD(destroyTranscription : (RCTResponseSenderBlock)callback) { - [self teardown]; - callback(@[ @false ]); +RCT_EXPORT_METHOD(destroySpeech:(RCTResponseSenderBlock)callback) { + [self teardown]; + callback(@[@false]); } -RCT_EXPORT_METHOD(isSpeechAvailable : (RCTResponseSenderBlock)callback) { - [SFSpeechRecognizer - requestAuthorization:^(SFSpeechRecognizerAuthorizationStatus status) { +RCT_EXPORT_METHOD(isSpeechAvailable:(RCTResponseSenderBlock)callback) { + [SFSpeechRecognizer requestAuthorization:^(SFSpeechRecognizerAuthorizationStatus status) { switch (status) { - case SFSpeechRecognizerAuthorizationStatusAuthorized: - callback(@[ @true ]); - break; - default: - callback(@[ @false ]); + case SFSpeechRecognizerAuthorizationStatusAuthorized: + callback(@[@true]); + break; + default: + callback(@[@false]); } - }]; + }]; } -RCT_EXPORT_METHOD(isRecognizing : (RCTResponseSenderBlock)callback) { - if (self.recognitionTask != nil) { - switch (self.recognitionTask.state) { - case SFSpeechRecognitionTaskStateRunning: - callback(@[ @true ]); - break; - default: - callback(@[ @false ]); +RCT_EXPORT_METHOD(isRecognizing:(RCTResponseSenderBlock)callback) { + if (self.recognitionTask != nil){ + switch (self.recognitionTask.state) { + case SFSpeechRecognitionTaskStateRunning: + callback(@[@true]); + break; + default: + callback(@[@false]); + } } - } else { - callback(@[ @false ]); - } -} - -RCT_EXPORT_METHOD(startSpeech - : (NSString *)localeStr callback - : (RCTResponseSenderBlock)callback) { - if (self.recognitionTask != nil) { - [self sendResult:RCTMakeError(@"Speech recognition already started!", nil, - nil):nil:nil:nil]; - return; - } - - [SFSpeechRecognizer requestAuthorization:^( - SFSpeechRecognizerAuthorizationStatus status) { - switch (status) { - case SFSpeechRecognizerAuthorizationStatusNotDetermined: - [self sendResult:RCTMakeError(@"Speech recognition not yet authorized", - nil, nil):nil:nil:nil]; - break; - case SFSpeechRecognizerAuthorizationStatusDenied: - [self sendResult:RCTMakeError(@"User denied access to speech recognition", - nil, nil):nil:nil:nil]; - break; - case SFSpeechRecognizerAuthorizationStatusRestricted: - [self sendResult:RCTMakeError( - @"Speech recognition restricted on this device", nil, - nil):nil:nil:nil]; - break; - case SFSpeechRecognizerAuthorizationStatusAuthorized: - [self setupAndStartRecognizing:localeStr]; - break; + else { + callback(@[@false]); } - }]; - callback(@[ @false ]); } -RCT_EXPORT_METHOD(startTranscription - : (NSString *)filePath withLocaleStr - : (NSString *)localeStr callback - : (RCTResponseSenderBlock)callback) { - if (self.recognitionTask != nil) { - [self sendResult:RCTMakeError(@"Speech recognition already started!", nil, - nil):nil:nil:nil]; - return; - } - - [SFSpeechRecognizer requestAuthorization:^( - SFSpeechRecognizerAuthorizationStatus status) { - switch (status) { - case SFSpeechRecognizerAuthorizationStatusNotDetermined: - [self sendResult:RCTMakeError(@"Speech recognition not yet authorized", - nil, nil):nil:nil:nil]; - break; - case SFSpeechRecognizerAuthorizationStatusDenied: - [self sendResult:RCTMakeError(@"User denied access to speech recognition", - nil, nil):nil:nil:nil]; - break; - case SFSpeechRecognizerAuthorizationStatusRestricted: - [self sendResult:RCTMakeError( - @"Speech recognition restricted on this device", nil, - nil):nil:nil:nil]; - break; - case SFSpeechRecognizerAuthorizationStatusAuthorized: - [self setupAndTranscribeFile:filePath withLocaleStr:localeStr]; - break; +RCT_EXPORT_METHOD(startSpeech:(NSString*)localeStr callback:(RCTResponseSenderBlock)callback) { + if (self.recognitionTask != nil) { + [self sendResult:RCTMakeError(@"Speech recognition already started!", nil, nil) :nil :nil :nil]; + return; } - }]; - callback(@[ @false ]); + + [self checkMicrophonePermissionWithCompletion:^(BOOL microphoneGranted) { + if (!microphoneGranted) { + [self showAlertWithTitle:@"Microphone Access Denied" message:@"Please enable microphone access in settings."]; + callback(@[@false]); + return; + } + + [SFSpeechRecognizer requestAuthorization:^(SFSpeechRecognizerAuthorizationStatus status) { + dispatch_async(dispatch_get_main_queue(), ^{ + switch (status) { + case SFSpeechRecognizerAuthorizationStatusNotDetermined: + [self sendResult:RCTMakeError(@"Speech recognition not yet authorized", nil, nil) :nil :nil :nil]; + [self showAlertWithTitle:@"Speech Recognition Not Authorized" message:@"Please enable speech recognition access in settings."]; + break; + case SFSpeechRecognizerAuthorizationStatusDenied: + [self sendResult:RCTMakeError(@"User denied access to speech recognition", nil, nil) :nil :nil :nil]; + [self showAlertWithTitle:@"Speech Recognition Access Denied" message:@"Please enable speech recognition access in settings."]; + break; + case SFSpeechRecognizerAuthorizationStatusRestricted: + [self sendResult:RCTMakeError(@"Speech recognition restricted on this device", nil, nil) :nil :nil :nil]; + [self showAlertWithTitle:@"Speech Recognition Restricted" message:@"Speech recognition is restricted on this device."]; + break; + case SFSpeechRecognizerAuthorizationStatusAuthorized: + [self setupAndStartRecognizing:localeStr]; + break; + } + callback(@[@false]); + }); + }]; + }]; } + - (dispatch_queue_t)methodQueue { - return dispatch_get_main_queue(); + return dispatch_get_main_queue(); } RCT_EXPORT_MODULE() -@end + + +@end \ No newline at end of file From 5ceb4a8713293814309a0942fe6bdeadda5ebe14 Mon Sep 17 00:00:00 2001 From: "vladyslav.yaremenko" Date: Fri, 26 Jul 2024 16:52:02 +0300 Subject: [PATCH 2/2] Updated Voice.m for tvos. Upgradede podspec for tvos. updated Eventsubscription. --- ios/Voice/Voice.m | 196 +++++++++++++++++++++---------------- react-native-voice.podspec | 2 +- src/VoiceModuleTypes.ts | 4 +- 3 files changed, 114 insertions(+), 88 deletions(-) diff --git a/ios/Voice/Voice.m b/ios/Voice/Voice.m index a4074210..b3a88928 100644 --- a/ios/Voice/Voice.m +++ b/ios/Voice/Voice.m @@ -1,3 +1,4 @@ +#if TARGET_OS_IOS #import "Voice.h" #import #import @@ -5,7 +6,6 @@ #import #import #import -#import @interface Voice () @@ -24,9 +24,6 @@ @interface Voice () /** Volume level Metering*/ @property float averagePowerForChannel0; @property float averagePowerForChannel1; -/**Timers for silences*/ -@property (nonatomic, strong) NSTimer *silenceTimer; -@property (nonatomic) NSTimeInterval silenceTimeout; @end @@ -34,15 +31,6 @@ @implementation Voice { } -// Initialize silence timeout --(instancetype)init { - self = [super init]; - if (self) { - _silenceTimeout = 10.0; // 10 seconds timeout - } - return self; -} - /** Returns "YES" if no errors had occurred */ -(BOOL) setupAudioSession { if ([self isHeadsetPluggedIn] || [self isHeadSetBluetooth]){ @@ -51,19 +39,19 @@ -(BOOL) setupAudioSession { else { [self.audioSession setCategory:AVAudioSessionCategoryPlayAndRecord withOptions:AVAudioSessionCategoryOptionDefaultToSpeaker error: nil]; } - + NSError* audioSessionError = nil; - + // Activate the audio session [self.audioSession setActive:YES withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation error:&audioSessionError]; - + if (audioSessionError != nil) { [self sendResult:@{@"code": @"audio", @"message": [audioSessionError localizedDescription]} :nil :nil :nil]; return NO; } [[NSNotificationCenter defaultCenter] addObserver:self selector:@selector(teardown) name:RCTBridgeWillReloadNotification object:nil]; - + return YES; } @@ -92,28 +80,25 @@ - (void) teardown { self.isTearingDown = YES; [self.recognitionTask cancel]; self.recognitionTask = nil; - - // Stop the silence timer - [self stopSilenceTimer]; - + // Set back audio session category [self resetAudioSession]; - + // End recognition request [self.recognitionRequest endAudio]; - + // Remove tap on bus [self.audioEngine.inputNode removeTapOnBus:0]; [self.audioEngine.inputNode reset]; - + // Stop audio engine and dereference it for re-allocation if (self.audioEngine.isRunning) { [self.audioEngine stop]; [self.audioEngine reset]; self.audioEngine = nil; } - - + + self.recognitionRequest = nil; self.sessionId = nil; self.isTearingDown = NO; @@ -124,7 +109,7 @@ -(void) resetAudioSession { self.audioSession = [AVAudioSession sharedInstance]; } // Set audio session to inactive and notify other sessions - [self.audioSession setActive:NO withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation error: nil]; + // [self.audioSession setActive:NO withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation error: nil]; NSString* audioCategory = [self.audioSession category]; // Category hasn't changed -- do nothing if ([self.priorAudioCategory isEqualToString:audioCategory]) return; @@ -143,42 +128,42 @@ - (void) setupAndStartRecognizing:(NSString*)localeStr { self.priorAudioCategory = [self.audioSession category]; // Tear down resources before starting speech recognition.. [self teardown]; - + self.sessionId = [[NSUUID UUID] UUIDString]; - + NSLocale* locale = nil; if ([localeStr length] > 0) { locale = [NSLocale localeWithLocaleIdentifier:localeStr]; } - + if (locale) { self.speechRecognizer = [[SFSpeechRecognizer alloc] initWithLocale:locale]; } else { self.speechRecognizer = [[SFSpeechRecognizer alloc] init]; } - + self.speechRecognizer.delegate = self; - + // Start audio session... if (![self setupAudioSession]) { [self teardown]; return; } - + self.recognitionRequest = [[SFSpeechAudioBufferRecognitionRequest alloc] init]; // Configure request so that results are returned before audio recording is finished self.recognitionRequest.shouldReportPartialResults = YES; - + if (self.recognitionRequest == nil) { [self sendResult:@{@"code": @"recognition_init"} :nil :nil :nil]; [self teardown]; return; } - + if (self.audioEngine == nil) { self.audioEngine = [[AVAudioEngine alloc] init]; } - + @try { AVAudioInputNode* inputNode = self.audioEngine.inputNode; if (inputNode == nil) { @@ -186,45 +171,42 @@ - (void) setupAndStartRecognizing:(NSString*)localeStr { [self teardown]; return; } - + [self sendEventWithName:@"onSpeechStart" body:nil]; - - + // A recognition task represents a speech recognition session. // We keep a reference to the task so that it can be cancelled. NSString *taskSessionId = self.sessionId; - // Start the silence timer - [self startSilenceTimer]; - + self.recognitionTask = [self.speechRecognizer recognitionTaskWithRequest:self.recognitionRequest resultHandler:^(SFSpeechRecognitionResult * _Nullable result, NSError * _Nullable error) { if (![taskSessionId isEqualToString:self.sessionId]) { // session ID has changed, so ignore any capture results and error [self teardown]; return; } - if (error != nil) { + if (error) { NSString *errorMessage = [NSString stringWithFormat:@"%ld/%@", error.code, [error localizedDescription]]; [self sendResult:@{@"code": @"recognition_fail", @"message": errorMessage} :nil :nil :nil]; [self teardown]; return; } - + // No result. if (result == nil) { [self sendEventWithName:@"onSpeechEnd" body:nil]; [self teardown]; return; } - + BOOL isFinal = result.isFinal; - + NSMutableArray* transcriptionDics = [NSMutableArray new]; for (SFTranscription* transcription in result.transcriptions) { [transcriptionDics addObject:transcription.formattedString]; } - + [self sendResult :nil :result.bestTranscription.formattedString :transcriptionDics :[NSNumber numberWithBool:isFinal]]; - + if (isFinal || self.recognitionTask.isCancelled || self.recognitionTask.isFinishing) { [self sendEventWithName:@"onSpeechEnd" body:nil]; if (!self.continuous) { @@ -232,13 +214,12 @@ - (void) setupAndStartRecognizing:(NSString*)localeStr { } return; } - }]; - + AVAudioFormat* recordingFormat = [inputNode outputFormatForBus:0]; AVAudioMixerNode *mixer = [[AVAudioMixerNode alloc] init]; [self.audioEngine attachNode:mixer]; - + // Start recording and append recording buffer to speech recognizer @try { [mixer installTapOnBus:0 bufferSize:1024 format:recordingFormat block:^(AVAudioPCMBuffer * _Nonnull buffer, AVAudioTime * _Nonnull when) { @@ -268,19 +249,18 @@ - (void) setupAndStartRecognizing:(NSString*)localeStr { self.averagePowerForChannel1 = [self _normalizedPowerLevelFromDecibels:self.averagePowerForChannel1]*10; NSNumber *value = [NSNumber numberWithFloat:self.averagePowerForChannel1]; [self sendEventWithName:@"onSpeechVolumeChanged" body:@{@"value": value}]; - + // Todo: write recording buffer to file (if user opts in) if (self.recognitionRequest != nil) { [self.recognitionRequest appendAudioPCMBuffer:buffer]; } }]; } @catch (NSException *exception) { - NSLog(@"[Error] - %@ %@", exception.name, exception.reason); [self sendResult:@{@"code": @"start_recording", @"message": [exception reason]} :nil :nil :nil]; [self teardown]; return; } @finally {} - + [self.audioEngine connect:inputNode to:mixer format:recordingFormat]; [self.audioEngine prepare]; NSError* audioSessionError = nil; @@ -292,31 +272,10 @@ - (void) setupAndStartRecognizing:(NSString*)localeStr { } } @catch (NSException *exception) { - NSLog(@"[Error] - %@ %@", exception.name, exception.reason); [self sendResult:@{@"code": @"start_recording", @"message": [exception reason]} :nil :nil :nil]; return; } } -//Timer methods start -/**Start silence timer */ --(void)startSilenceTimer { // ADDED - [self stopSilenceTimer]; - self.silenceTimer = [NSTimer scheduledTimerWithTimeInterval:self.silenceTimeout target:self selector:@selector(handleSilenceTimeout) userInfo:nil repeats:NO]; -} - -/**Stop silence timer */ --(void)stopSilenceTimer { // ADDED - if (self.silenceTimer) { - [self.silenceTimer invalidate]; - self.silenceTimer = nil; - } -} - -/**Handle silence timeout */ --(void)handleSilenceTimeout { // ADDED - [self sendResult:@{@"code": @"silence_timeout", @"message": @"No speech detected for a period of time"} :nil :nil :nil]; - [self teardown]; -} //Timer methods end - (CGFloat)_normalizedPowerLevelFromDecibels:(CGFloat)decibels { if (decibels < -80.0f || decibels == 0.0f) { @@ -358,7 +317,7 @@ - (void) sendResult:(NSDictionary*)error :(NSString*)bestTranscription :(NSArray } } -// Upgraded for checking Permissions in all steps +// Upgraded for checking Permissions in all steps - (void)checkMicrophonePermissionWithCompletion:(void (^)(BOOL granted))completion { [[AVAudioSession sharedInstance] requestRecordPermission:^(BOOL granted) { dispatch_async(dispatch_get_main_queue(), ^{ @@ -373,7 +332,7 @@ - (void)showAlertWithTitle:(NSString *)title message:(NSString *)message { UIAlertController *alertController = [UIAlertController alertControllerWithTitle:title message:message preferredStyle:UIAlertControllerStyleAlert]; - + UIAlertAction *settingsAction = [UIAlertAction actionWithTitle:@"Settings" style:UIAlertActionStyleDefault handler:^(UIAlertAction * _Nonnull action) { @@ -382,14 +341,14 @@ - (void)showAlertWithTitle:(NSString *)title message:(NSString *)message { [[UIApplication sharedApplication] openURL:settingsURL options:@{} completionHandler:nil]; } }]; - + UIAlertAction *notNowAction = [UIAlertAction actionWithTitle:@"Not Now" style:UIAlertActionStyleDefault handler:nil]; - + [alertController addAction:settingsAction]; [alertController addAction:notNowAction]; - + UIViewController *rootViewController = RCTPresentedViewController(); // Ensure you have a way to get the root view controller [rootViewController presentViewController:alertController animated:YES completion:nil]; }); @@ -402,8 +361,7 @@ - (void)speechRecognizer:(SFSpeechRecognizer *)speechRecognizer availabilityDidC } } -RCT_EXPORT_METHOD(stopSpeech:(RCTResponseSenderBlock)callback) -{ +RCT_EXPORT_METHOD(stopSpeech:(RCTResponseSenderBlock)callback) { [self.recognitionTask finish]; callback(@[@false]); } @@ -450,14 +408,14 @@ - (void)speechRecognizer:(SFSpeechRecognizer *)speechRecognizer availabilityDidC [self sendResult:RCTMakeError(@"Speech recognition already started!", nil, nil) :nil :nil :nil]; return; } - + [self checkMicrophonePermissionWithCompletion:^(BOOL microphoneGranted) { if (!microphoneGranted) { [self showAlertWithTitle:@"Microphone Access Denied" message:@"Please enable microphone access in settings."]; callback(@[@false]); return; } - + [SFSpeechRecognizer requestAuthorization:^(SFSpeechRecognizerAuthorizationStatus status) { dispatch_async(dispatch_get_main_queue(), ^{ switch (status) { @@ -492,4 +450,72 @@ - (dispatch_queue_t)methodQueue { -@end \ No newline at end of file +@end +#else + +#import + +@interface Voice : RCTEventEmitter +@end + +@implementation Voice + +- (void) sendResult:(NSDictionary*)error :(NSString*)bestTranscription :(NSArray*)transcriptions :(NSNumber*)isFinal { + if (error) { + [self sendEventWithName:@"onSpeechError" body:@{@"error": error}]; + } + if (bestTranscription) { + [self sendEventWithName:@"onSpeechResults" body:@{@"value":@[bestTranscription]} ]; + } + if (transcriptions) { + [self sendEventWithName:@"onSpeechPartialResults" body:@{@"value":transcriptions}]; + } + if (isFinal) { + [self sendEventWithName:@"onSpeechRecognized" body: @{@"isFinal": isFinal}]; + } +} + +- (void)setupAndStartRecognizing:(NSString*)localeStr { + [self sendResult:@{@"code": @"speech_unavailable", @"message": @"Speech recognition is not available on tvOS"} :nil :nil :nil]; +} + +- (void)checkMicrophonePermissionWithCompletion:(void (^)(BOOL granted))completion { + // tvOS does not require microphone permissions for speech recognition + completion(NO); +} + + +RCT_EXPORT_METHOD(stopSpeech:(RCTResponseSenderBlock)callback) { + callback(@[@false]); +} + +RCT_EXPORT_METHOD(cancelSpeech:(RCTResponseSenderBlock)callback) { + callback(@[@false]); +} + +RCT_EXPORT_METHOD(destroySpeech:(RCTResponseSenderBlock)callback) { + callback(@[@false]); +} + +RCT_EXPORT_METHOD(isSpeechAvailable:(RCTResponseSenderBlock)callback) { + callback(@[@false]); +} + +RCT_EXPORT_METHOD(isRecognizing:(RCTResponseSenderBlock)callback) { + callback(@[@false]); +} + +RCT_EXPORT_METHOD(startSpeech:(NSString*)localeStr callback:(RCTResponseSenderBlock)callback) { + [self sendResult:@{@"code": @"speech_unavailable", @"message": @"Speech recognition is not available on tvOS"} :nil :nil :nil]; + callback(@[@false]); +} + +- (dispatch_queue_t)methodQueue { + return dispatch_get_main_queue(); +} + +RCT_EXPORT_MODULE() + +@end + +#endif diff --git a/react-native-voice.podspec b/react-native-voice.podspec index dc396514..c44f6dc4 100644 --- a/react-native-voice.podspec +++ b/react-native-voice.podspec @@ -10,7 +10,7 @@ Pod::Spec.new do |s| s.authors = package['author'] s.homepage = package['homepage'] - s.platform = :ios, "9.0" + s.platforms = { :ios => "9.0", :tvos => "15.0" } s.source = { :git => "https://github.com/react-native-voice/voice.git" } s.source_files = "ios/**/*.{h,m}" diff --git a/src/VoiceModuleTypes.ts b/src/VoiceModuleTypes.ts index 8710b42f..c55691c4 100644 --- a/src/VoiceModuleTypes.ts +++ b/src/VoiceModuleTypes.ts @@ -1,4 +1,4 @@ -import { EventSubscriptionVendor } from 'react-native'; +import { EventSubscription } from 'react-native'; type Callback = (error: string) => void; @@ -20,7 +20,7 @@ export type VoiceModule = { isSpeechAvailable: Function; } & SpeechEvents & TranscriptionEvents & - EventSubscriptionVendor; + EventSubscription; export type SpeechEvents = { onSpeechStart?: (e: SpeechStartEvent) => void;