ios - 实施 SFSpeechAudioBufferRecognitionRequest Domain=kAFAssistantErrorDomain Code=216 时出错
问题描述
使用 Objective-C 语言实现 SFSpeechAudioBufferRecognitionRequest 时出现错误。这是我的代码..它在一天前就开始工作了。错误是 Domain=kAFAssistantErrorDomain Code=216 "(null)"
- (void)startListening {
// Initialize the AVAudioEngine
audioEngine = [[AVAudioEngine alloc] init];
// Make sure there's not a recognition task already running
if (recognitionTask) {
[recognitionTask cancel];
recognitionTask = nil;
}
// Starts an AVAudio Session
NSError *error;
AVAudioSession *audioSession = [AVAudioSession sharedInstance];
[audioSession setCategory:AVAudioSessionCategoryPlayAndRecord error:&error];
[audioSession setActive:YES withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation error:&error];
// Starts a recognition process, in the block it logs the input or stops the audio
// process if there's an error.
recognitionRequest = [[SFSpeechAudioBufferRecognitionRequest alloc] init];
inputNode = audioEngine.inputNode;
recognitionRequest.shouldReportPartialResults = NO;
recognitionRequest.taskHint = SFSpeechRecognitionTaskHintDictation;
[self startWaveAudio];
// Sets the recording format
AVAudioFormat *recordingFormat = [inputNode outputFormatForBus:0];
[inputNode installTapOnBus:0 bufferSize:4096 format:recordingFormat block:^(AVAudioPCMBuffer * _Nonnull buffer, AVAudioTime * _Nonnull when) {
[recognitionRequest appendAudioPCMBuffer:buffer];
}];
// Starts the audio engine, i.e. it starts listening.
[audioEngine prepare];
[audioEngine startAndReturnError:&error];
__block BOOL isFinal = NO;
recognitionTask = [speechRecognizer recognitionTaskWithRequest:recognitionRequest resultHandler:^(SFSpeechRecognitionResult * _Nullable result, NSError * _Nullable error) {
[self stopWaveAudio];
if (result) {
// Whatever you say in the microphone after pressing the button should be being logged
// in the console.
NSLog(@"RESULT:%@",result.bestTranscription.formattedString);
for (SFTranscription *tra in result.transcriptions) {
NSLog(@"Multiple Results : %@", tra.formattedString);
}
if(isFinal == NO) {
[self calculateResultOfSpeechWithResultString:result.bestTranscription.formattedString];
}
isFinal = !result.isFinal;
}
if (error || isFinal) {
NSLog(@"Error Description : %@", error);
[self stopRecording];
}
}];
}
- (IBAction)tap2TlkBtnPrsd:(UIButton *)sender {
if (audioEngine.isRunning) {
[self stopRecording];
} else {
[self startListening];
}
isMicOn = !isMicOn;
micPrompt = NO;
}
-(void)stopRecording {
// dispatch_async(dispatch_get_main_queue(), ^{
if(audioEngine.isRunning){
[inputNode removeTapOnBus:0];
[inputNode reset];
[audioEngine stop];
[recognitionRequest endAudio];
[recognitionTask cancel];
recognitionTask = nil;
recognitionRequest = nil;
}
// });
}
并且正在尝试不同的方式,比如在请求语音后附加音频缓冲区..
如果可能的话,任何人都可以告诉我,我怎样才能实现一个场景,比如用户会拼写这个词,结果只会是那个词?
解决方案
当我取消识别任务时,我有相同的 Error=216。只有当识别器认为说话者已经说完时,的isFinal
属性才成立。SFSpeechRecognitionResult
因此,当您isFinal = !result.isFinal;
第一次尝试时False
,您的isFinal
标志会调用块在哪里stopRecording()
,用 取消它[recognitionTask cancel];
。
因此,如果您只想要第一个转录(单词),您可以调用您substring
的第一段的属性bestTranscription
,然后调用[recognitionTask finish];
.
...
if (result) {
// First transcription
NSLog(@"RESULT:%@",[[result.bestTranscription.segments.firstObject] substring]);
[recognitionTask finish];
[self stopRecording];
}
if (error) {
NSLog(@"Error Description : %@", error);
[recognitionTask cancel];
[self stopRecording];
}
...
-(void)stopRecording {
if(audioEngine.isRunning){
[inputNode removeTapOnBus:0];
[inputNode reset];
[audioEngine stop];
[recognitionRequest endAudio];
recognitionTask = nil;
recognitionRequest = nil;
}
}
推荐阅读
- microsoft-graph-api - 邮件发送的 MS Graph API 订阅
- python - Python,NLP - 查找包含给定单词列表的顶级文档
- java - 我在黄瓜中的脚本不适用于 appium 服务器?
- android - Flutter:无法发送 Firebase 推送通知
- ios - 使用 SwiftUI 获取 AVPlayerItem 曲目标题
- javascript - 从无法在移动设备上运行的 php 定期更新 DIV 的内容
- twilio - 如何修复 twilio 出站呼叫错误 12100?
- r - 如何在从线性模型创建的两个图中独立标记两个 x 轴?
- javascript - 如何根据用户的操作系统更改网站上的 css 样式?
- ios - SwiftUI 通用拉取刷新视图