ios - 处理格式和数据正确的音频文件时出错
问题描述
我正在尝试将处理音频 url 与视频 url 合并。我已经进行了音频以使用audioEngine.renderOffline
方法更改音高值。但输出音频文件返回 nil 值audioAsset.tracks(withMediaType: .audio).first
(同时 audioAsset.metadata 为空)。由于 nil 值,我无法合并视频和音频。
注意:我可以共享继续的音频文件,它可以工作,但当我在AVAudioPlayer
.
我也尝试使用installTapOnBus
方法进行音频,但每次都没有得到正确的输出文件。
请帮我解决上述错误。
代码如下:
func extractAudio(url:URL) {
// Create a composition
let composition = AVMutableComposition()
let asset = AVURLAsset(url: url)
do {
guard let audioAssetTrack = asset.tracks(withMediaType: AVMediaType.audio).first else { return }
guard let audioCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: kCMPersistentTrackID_Invalid) else { return }
try audioCompositionTrack.insertTimeRange(audioAssetTrack.timeRange, of: audioAssetTrack, at: CMTime.zero)
} catch {
print(error)
}
// Get url for output
let outputUrl = URL(fileURLWithPath: NSTemporaryDirectory() + "out.m4a")
if FileManager.default.fileExists(atPath: outputUrl.path) {
try? FileManager.default.removeItem(atPath: outputUrl.path)
}
// Create an export session
let exportSession = AVAssetExportSession(asset: composition, presetName: AVAssetExportPresetAppleM4A)!
exportSession.outputFileType = AVFileType.m4a
exportSession.outputURL = URL.init(fileURLWithPath: outputUrl.path)
exportSession.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: asset.duration)
// Export file
exportSession.exportAsynchronously {
guard case exportSession.status = AVAssetExportSession.Status.completed else { return }
self.sourceFile = try! AVAudioFile(forReading: outputUrl)
self.format = self.sourceFile.processingFormat
self.playAndRecord(pitch: -500, rate: 1.0, reverb: 10, echo: 1.0)
}
}
func playAndRecord(pitch : Float, rate: Float, reverb: Float, echo: Float) {
let engine = AVAudioEngine()
let player = AVAudioPlayerNode()
let reverbEffect = AVAudioUnitReverb()
let pitchEffect = AVAudioUnitTimePitch()
let playbackRateEffect = AVAudioUnitVarispeed()
engine.attach(player)
engine.attach(reverbEffect)
engine.attach(pitchEffect)
engine.attach(playbackRateEffect)
// Set the desired reverb parameters.
reverbEffect.loadFactoryPreset(.mediumHall)
reverbEffect.wetDryMix = reverb
pitchEffect.pitch = pitch
playbackRateEffect.rate = rate
// Connect the nodes.
engine.connect(player, to: reverbEffect, format: format)
engine.connect(reverbEffect, to: pitchEffect, format: format)
engine.connect(pitchEffect, to: playbackRateEffect, format: format)
engine.connect(playbackRateEffect, to: engine.mainMixerNode, format: format)
// Schedule the source file.
player.scheduleFile(sourceFile, at: nil)
do {
// The maximum number of frames the engine renders in any single render call.
let maxFrames: AVAudioFrameCount = 4096
try engine.enableManualRenderingMode(.offline, format: format,
maximumFrameCount: maxFrames)
} catch {
fatalError("Enabling manual rendering mode failed: \(error).")
}
do {
try engine.start()
player.play()
} catch {
fatalError("Unable to start audio engine: \(error).")
}
// The output buffer to which the engine renders the processed data.
let buffer = AVAudioPCMBuffer(pcmFormat: engine.manualRenderingFormat,
frameCapacity: engine.manualRenderingMaximumFrameCount)!
var outputFile: AVAudioFile
do {
let documentsURL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0]
let outputURL = documentsURL.appendingPathComponent("EDittedFile.m4a")
outputFile = try AVAudioFile(forWriting: outputURL, settings: sourceFile.fileFormat.settings, commonFormat: .pcmFormatInt32,interleaved: true)
} catch {
fatalError("Unable to open output audio file: \(error).")
}
// Process the file
while engine.manualRenderingSampleTime < sourceFile.length {
do {
let frameCount = sourceFile.length - engine.manualRenderingSampleTime
let framesToRender = min(AVAudioFrameCount(frameCount), buffer.frameCapacity)
let status = try engine.renderOffline(framesToRender, to: buffer)
switch status {
case .success:
// The data rendered successfully. Write it to the output file.
try outputFile.write(from: buffer)
case .insufficientDataFromInputNode:
// Applicable only when using the input node as one of the sources.
break
case .cannotDoInCurrentContext:
// The engine couldn't render in the current render call.
// Retry in the next iteration.
break
case .error:
// An error occurred while rendering the audio.
fatalError("The manual rendering failed.")
}
} catch {
fatalError("The manual rendering failed: \(error).")
}
}
print("finished")
let asset = AVURLAsset.init(url: outputFile.url)
print(asset.tracks(withMediaType: .audio))
player.stop()
engine.stop()
}
提前致谢。