首页 > 解决方案 > 错误域=AVFoundationErrorDomain Code=-11800“操作无法完成”{错误域=NSOSStatusErrorDomain Code=-16976“(null)”}

问题描述

我正在开发 Swift3 iOS 中的视频应用程序。基本上我必须将视频资产和音频合并为一个具有淡入淡出效果并将其保存到 iPhone 画廊。为此,我使用以下方法:

private func doMerge(arrayVideos:[AVAsset], arrayAudios:[AVAsset], animation:Bool, completion:@escaping Completion) -> Void {

        var insertTime = kCMTimeZero
        var audioInsertTime = kCMTimeZero
        var arrayLayerInstructions:[AVMutableVideoCompositionLayerInstruction] = []
        var outputSize = CGSize.init(width: 0, height: 0)

        // Determine video output size
        for videoAsset in arrayVideos {
            let videoTrack = videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0]

            let assetInfo = orientationFromTransform(transform: videoTrack.preferredTransform)
            var videoSize = videoTrack.naturalSize
            if assetInfo.isPortrait == true {
                videoSize.width = videoTrack.naturalSize.height
                videoSize.height = videoTrack.naturalSize.width
            }
            outputSize = videoSize
        }

        // Init composition
        let mixComposition = AVMutableComposition.init()

        for index in 0..<arrayVideos.count {
            // Get video track
            guard let videoTrack = arrayVideos[index].tracks(withMediaType: AVMediaTypeVideo).first else { continue }

            // Get audio track
            var audioTrack:AVAssetTrack?
            if index < arrayAudios.count {
                if arrayAudios[index].tracks(withMediaType: AVMediaTypeAudio).count > 0 {
                    audioTrack = arrayAudios[index].tracks(withMediaType: AVMediaTypeAudio).first
                }
            }
            // Init video & audio composition track
            let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))

            let audioCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))

            do {
                let startTime = kCMTimeZero
                let duration = arrayVideos[index].duration

                // Add video track to video composition at specific time
                try videoCompositionTrack.insertTimeRange(CMTimeRangeMake(startTime, duration), of: videoTrack, at: insertTime)

                // Add audio track to audio composition at specific time
                var audioDuration = kCMTimeZero
                if index < arrayAudios.count   {
                    audioDuration = arrayAudios[index].duration
                }

                if let audioTrack = audioTrack {
                    do {
                        try audioCompositionTrack.insertTimeRange(CMTimeRangeMake(startTime, audioDuration), of: audioTrack, at: audioInsertTime)
                    }
                    catch {
                        print(error.localizedDescription)
                    }
                }

                // Add instruction for video track
                let layerInstruction = videoCompositionInstructionForTrack(track: videoCompositionTrack, asset: arrayVideos[index], standardSize: outputSize, atTime: insertTime)

                // Hide video track before changing to new track
                let endTime = CMTimeAdd(insertTime, duration)

                if animation {
                    let timeScale = arrayVideos[index].duration.timescale
                    let durationAnimation = CMTime.init(seconds: 1, preferredTimescale: timeScale)

                    layerInstruction.setOpacityRamp (fromStartOpacity: 1.0, toEndOpacity: 0.0, timeRange: CMTimeRange.init(start: endTime, duration: durationAnimation))
                }
                else {
                    layerInstruction.setOpacity(0, at: endTime)
                }

                arrayLayerInstructions.append(layerInstruction)

                // Increase the insert time
                audioInsertTime = CMTimeAdd(audioInsertTime, audioDuration)
                insertTime = CMTimeAdd(insertTime, duration)
            }
            catch {
                print("Load track error")
            }
        }

        // Main video composition instruction
        let mainInstruction = AVMutableVideoCompositionInstruction()
        mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, insertTime)
        mainInstruction.layerInstructions = arrayLayerInstructions

        // Main video composition
        let mainComposition = AVMutableVideoComposition()
        mainComposition.instructions = [mainInstruction]
        mainComposition.frameDuration = CMTimeMake(1, 30)
        mainComposition.renderSize = outputSize

        // Export to file
        let path = NSTemporaryDirectory().appending("mergedVideo.mp4")
        let exportURL = URL.init(fileURLWithPath: path)

        // Remove file if existed
        FileManager.default.removeItemIfExisted(exportURL)

        // Init exporter
        let exporter = AVAssetExportSession.init(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
        exporter?.outputURL = exportURL
        exporter?.outputFileType = AVFileTypeQuickTimeMovie//AVFileType.mp4
        exporter?.shouldOptimizeForNetworkUse = false //true
        exporter?.videoComposition = mainComposition

        // Do export
        exporter?.exportAsynchronously(completionHandler: {
            DispatchQueue.main.async {
                self.exportDidFinish(exporter: exporter, videoURL: exportURL, completion: completion)
            }
        })

    }



fileprivate func exportDidFinish(exporter:AVAssetExportSession?, videoURL:URL, completion:@escaping Completion) -> Void {
        if exporter?.status == AVAssetExportSessionStatus.completed {
            print("Exported file: \(videoURL.absoluteString)")
            completion(videoURL,nil)
        }
        else if exporter?.status == AVAssetExportSessionStatus.failed {
            completion(videoURL,exporter?.error)

            print(exporter?.error as Any)
        }
    }

问题:在我的 exportDidFinish 方法中,AVAssetExportSessionStatus 失败并显示以下错误消息:

Error Domain=AVFoundationErrorDomain Code=-11800“操作无法完成” UserInfo={NSLocalizedFailureReason=发生未知错误(-16976),NSLocalizedDescription=操作无法完成,NSUnderlyingError=0x1c065fb30 {Error Domain=NSOSStatusErrorDomain Code=- 16976 “(空)”}}

任何人都可以建议我这个。

标签: iosvideoswift3avfoundation

解决方案


我遇到了完全相同的错误,并且只在运行 iOS11 的 iPhone 5S 模拟器上。我通过将导出操作的质量设置从“最高”(AVAssetExportPresetHighestQuality)更改为“通过”(AVAssetExportPresetPassthrough)(保持原始质量)来修复它:

/// try to start an export session and set the path and file type
    if let exportSession = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetPassthrough) { /* AVAssetExportPresetHighestQuality */
      exportSession.outputURL = videoOutputURL
      exportSession.outputFileType = AVFileType.mp4
      exportSession.shouldOptimizeForNetworkUse = true

      exportSession.exportAsynchronously(completionHandler: {
        switch exportSession.status {
        case .failed:
          if let _error = exportSession.error {
            // !!!used to fail over here with 11800, -16976 codes, if using AVAssetExportPresetHighestQuality.  But works fine when using:  AVAssetExportPresetPassthrough
            failure(_error)
          }
          ....

希望这对某人有所帮助,因为该错误代码和消息不提供任何信息。这只是一个“未知错误”。除了更改质量设置外,我还会尝试更改其他设置并剥离导出操作,以识别该操作中可能失败的特定组件。(一些特定的图像、音频或视频资产)。当你有这样一个普遍的错误信息时,最好使用消除过程,每次将代码切成两半,在对数时间内解决问题。


推荐阅读