首页 > 解决方案 > Swift - 读取两个音频文件并计算它们的互相关

问题描述

(我将用作阅读文件参考,是缺少实现的 Objective-C 线索,是迄今为止我在 Objective-C 中看到的最接近的实现。)

我正在尝试获取一个互相关数组,该数组是根据 Swift Playgrounds 中的两个音频数组计算得出的。

我正在尝试执行的步骤(我在 Python 中轻松完成,使用scipy.signal.correlate)按以下顺序执行:

sample_index = max_index - (array_length / 2)
delay_in_seconds = sample_index / sample_rate

下面是我到目前为止所做的工作,但我被困在相关函数中。我不知道如何调用vDSP_convcorrelate

就个人而言,我更喜欢vDSP_conv,因为它与旧的 Apple 设备兼容,但实现一个工作脚本已经很好了。我什至不知道如何调用传递模拟数组的 vDSP 函数,更不用说从音频文件中获取的数组了。

此外,关于滑动信号的方向,卷积和相关之间存在概念上的差异。我不确定如何在vDSP_conv函数中处理这个选项,我想我应该将__IF参数传递为-1.

import AVFoundation
import Accelerate

enum ExampleError: Error {
    case mismatchingSampleRates
}

enum TestError: Error {
    case delayCalculationError
    case zeroPaddingError
}

func main() {
    let time_delay: Float = try! get_delay(filename_1: "metronome", extension_1: ".aif",
                               filename_2: "metronome_100ms_delay", extension_2: ".aiff")
    print(time_delay)
}

func get_delay(filename_1: String, extension_1: String, filename_2: String, extension_2: String) throws -> Float {
    var (audio_1, sr_1) = readAudio(filename: filename_1, file_extension: extension_1)
    var (audio_2, sr_2) = readAudio(filename: filename_2, file_extension: extension_2)
    
    if (sr_1 != sr_2) {
        throw ExampleError.mismatchingSampleRates
    }
    (audio_1, audio_2) = append_zeros_at_shortest(audio_1, audio_2)
    let correlation_array: [Float] = [0, 10, 20, 30, 40, 41, 20, 10, 0] //mock value
    return get_time_at_peak(correlation_array, sample_rate: 1) // mock sample rate
    }

func readAudio(filename: String, file_extension: String) -> ([Float], Int) {
    let audioUrl = Bundle.main.url(forResource: filename, withExtension: file_extension)!
    let audioFile = try! AVAudioFile(forReading: audioUrl)
    let audioFileFormat = audioFile.processingFormat
    let audioFileSize = UInt32(audioFile.length)
    let audioBuffer = AVAudioPCMBuffer(pcmFormat: audioFileFormat, frameCapacity: audioFileSize)!
    try! audioFile.read(into: audioBuffer) //not sure why to call this
    return (Array(UnsafeBufferPointer(start: audioBuffer.floatChannelData![0], count: Int(audioBuffer.frameLength))), Int(audioFile.fileFormat.sampleRate))
}

func append_zeros_at_shortest(_ audio_1: [Float],_ audio_2: [Float]) -> ([Float], [Float]) {
    if audio_1.count == audio_2.count {
        return (audio_1, audio_2)
    }
    if audio_1.count < audio_2.count {
        return (append_zeros(at: audio_1, new_len: audio_2.count), audio_2)
    }
    //if audio_2.count < audio_1.count ...
    return (audio_1, append_zeros(at: audio_2, new_len: audio_1.count))
}

func append_zeros(at old_array: [Float], new_len: Int) -> [Float] {
    var new_array = Array<Float>(repeating: 0, count: new_len)
    new_array[0..<old_array.count] = old_array[0..<old_array.count]
    return new_array
}

func get_time_at_peak(_ array: [Float],sample_rate sr: Int) -> Float {
    var center_index =  Float(array.count / 2)
    if array.count % 2 == 0 {
        center_index -= 0.5
    }
    let max_index = Float(array.indices.first{array[$0] == array.max()}!)
    return (max_index - center_index) / Float(sr)
}


func test_get_time_at_peak() throws -> Bool {
    if get_time_at_peak([0,10, 20, 30, 40, 50, 40, 30, 20, 10, 0], sample_rate: 1) != 0 {
        throw TestError.delayCalculationError
    }
    if get_time_at_peak([0,10, 20, 30, 40, 50, 51, 30, 20, 10, 0], sample_rate: 1) != 1 {
        throw TestError.delayCalculationError
    }
    if get_time_at_peak([0,10, 20, 30, 51, 50, 40, 30, 20, 10, 0], sample_rate: 1) != -1 {
        throw TestError.delayCalculationError
    }
    if get_time_at_peak([0,10, 20, 30, 40, 50, 40, 51, 20, 10, 0], sample_rate: 2) != 1 {
        throw TestError.delayCalculationError
    }
    if get_time_at_peak([0,10, 20, 30, 40, 50, 51, 40, 20, 10, 0], sample_rate: 2) != 0.5 {
        throw TestError.delayCalculationError
    }
    if get_time_at_peak([0, 10, 20, 30, 0, 41, 20, 10], sample_rate: 1) != 1.5 {
        throw TestError.delayCalculationError
    }
    return true
}

func test_append_zeros() throws -> Bool {
    let (v1, v2) = append_zeros_at_shortest([1,2,3], [10,20,30,40,50])
    if v1 != [1,2,3,0,0] {
        throw TestError.zeroPaddingError
    }
    if v2 != [10,20,30,40,50] {
        throw TestError.zeroPaddingError
    }
    return true
}

func call_tests() {
    try! test_get_time_at_peak()
    try! test_append_zeros()
}

call_tests()
main()

我尝试使用以下方法实现相关性,而不是模拟数组:

var correlation_array: UnsafeMutablePointer<Float>
vDSP_conv(audio_1, 1, audio_2, 1, correlation_array, 1, vDSP_Length(2 * audio_1.count - 1), vDSP_Length(audio_1.count))

但我在编译时收到以下错误:

无法将“UnsafeMutablePointer”类型的值转换为预期的参数类型“[Float]”

我也尝试过:

let correlation_array: [Float] = vDSP.correlate(audio_1, withKernel: audio_2)

但我在运行时收到以下错误:

错误:执行被中断,原因:EXC_BAD_INSTRUCTION(代码=EXC_I386_INVOP,子代码=0x0)。

有了这个,我得到一个空数组:

let correlation_array: [Float] = vDSP.correlate([0,10,20,30,20,10,0], withKernel: [0,0,10,20,30,20,10])

标签: iosswiftaudiosignal-processingaccelerate

解决方案


在 Github 中搜索,我找到了下面的实现。我已经对其进行了测试,并在两次录音之间给了我 98.97 毫秒的延迟。根据 Audacity 探测(99 毫秒),这大约是延迟值。

现在我可以看到我应该将指向结果浮点数组的指针作为参数传递。

在下面的实现中还有一个零填充,因此 x 和 y 具有相同的长度。之后,x 在末尾和开头都被填充,因此它可以在 y 数组上移动,遵循卷积操作背后的理论。

// Cross-correlation of a signal [x], with another signal [y]. The signal [y]
// is padded so that it is the same length as [x].
public func xcorr(_ x: [Float], _ y: [Float]) -> [Float] {
    precondition(x.count >= y.count, "Input vector [x] must have at least as many elements as [y]")
    var yPadded = y
    if x.count > y.count {
        let padding = repeatElement(Float(0.0), count: x.count - y.count)
        yPadded = y + padding
    }
    
    let resultSize = x.count + yPadded.count - 1
    var result = [Float](repeating: 0, count: resultSize)
    let xPad = repeatElement(Float(0.0), count: yPadded.count-1)
    let xPadded = xPad + x + xPad
    vDSP_conv(xPadded, 1, yPadded, 1, &result, 1, vDSP_Length(resultSize), vDSP_Length(yPadded.count))
    
    return result
}

推荐阅读