首页 > 解决方案 > 如何快速使用 Microsoft Speech API:iOS Speech-to-Text 客户端库?

问题描述

由于我是 swift 语言的新手,我知道如何使用 microsoft API 将语音转换为目标 c 中的文本,但作为客户端请求的一部分,我需要 swift 语言。任何人都可以帮助我如何用 swift 语言做到这一点。我还添加了我在目标 c 中使用的示例代码

-(void)onFinalResponseReceived:(RecognitionResult*)response {
    bool isFinalDicationMessage = self.mode == SpeechRecognitionMode_LongDictation &&
    (response.RecognitionStatus == RecognitionStatus_EndOfDictation ||
     response.RecognitionStatus == RecognitionStatus_DictationEndSilenceTimeout);
    if (nil != micClient && self.useMicrophone && ((self.mode == SpeechRecognitionMode_ShortPhrase) || isFinalDicationMessage)) {
        // we got the final result, so it we can end the mic reco.  No need to do this
        // for dataReco, since we already called endAudio on it as soon as we were done
        // sending all the data.
        [micClient endMicAndRecognition];
    }

    if ((self.mode == SpeechRecognitionMode_ShortPhrase) || isFinalDicationMessage) {
        dispatch_async(dispatch_get_main_queue(), ^{
            [[self startButton] setEnabled:YES];
        });
    }

    if (!isFinalDicationMessage) {
        dispatch_async(dispatch_get_main_queue(), ^{
            [self WriteLine:(@"********* Final n-BEST Results *********")];
            for (int i = 0; i < [response.RecognizedPhrase count]; i++) {
                RecognizedPhrase* phrase = response.RecognizedPhrase[i];
                [self WriteLine:[[NSString alloc] initWithFormat:(@"[%d] Confidence=%@ Text=\"%@\""),
                                 i,
                                 ConvertSpeechRecoConfidenceEnumToString(phrase.Confidence),
                                 phrase.DisplayText]];
            }

            [self WriteLine:(@"")];
        });
    }
}

//convert speech
OSStatus status = [micClient startMicAndRecognition];
        if (status) {
            [self WriteLine:[[NSString alloc] initWithFormat:(@"Error starting audio. %@"), 
ConvertSpeechErrorToString(status)]];
        }

NSString* ConvertSpeechErrorToString(int errorCode) {
    switch ((SpeechClientStatus)errorCode) {
        case SpeechClientStatus_SecurityFailed:         return @"SpeechClientStatus_SecurityFailed";
}

标签: iosswiftspeech-recognition

解决方案


尝试这个:

func onFinalResponseReceived(_ response: RecognitionResult?) {
    let isFinalDicationMessage: Bool = mode == SpeechRecognitionMode_LongDictation && (response?.recognitionStatus == RecognitionStatus_EndOfDictation || response?.recognitionStatus == RecognitionStatus_DictationEndSilenceTimeout)
    if nil != micClient && useMicrophone && ((mode == SpeechRecognitionMode_ShortPhrase) || isFinalDicationMessage) {
        // we got the final result, so it we can end the mic reco.  No need to do this
        // for dataReco, since we already called endAudio on it as soon as we were done
        // sending all the data.
        micClient.endMicAndRecognition()
    }
    if (mode == SpeechRecognitionMode_ShortPhrase) || isFinalDicationMessage {
        DispatchQueue.main.async(execute: {() -> Void in
            self.startButton().enabled = true
        })
    }
    if !isFinalDicationMessage {
        DispatchQueue.main.async(execute: {() -> Void in
            self.writeLine(("********* Final n-BEST Results *********"))
            var i = 0
            while i < response.recognizedPhrase.count() {
                var phrase: RecognizedPhrase? = response.recognizedPhrase[i]
                if let aText = phrase?.displayText {
                    self.writeLine(("[\(i)] Confidence=\(ConvertSpeechRecoConfidenceEnumToString(phrase?.confidence)) Text=\"\(aText)\""))
                }
                i
            }
            i += 1
            self.writeLine((""))
        })
    }
}

// edit:
var status: OSStatus = micClient.startMicAndRecognition()
func (int errorCode) -> String? {
    switch errorCode as? SpeechClientStatus {
    case SpeechClientStatus_SecurityFailed:
        return "SpeechClientStatus_SecurityFailed"
    }
}

推荐阅读